file_path
stringlengths
21
202
content
stringlengths
19
1.02M
size
int64
19
1.02M
lang
stringclasses
8 values
avg_line_length
float64
5.88
100
max_line_length
int64
12
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btAlignedObjectArray.h
/* Bullet Continuous Collision Detection and Physics Library Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_OBJECT_ARRAY__ #define BT_OBJECT_ARRAY__ #include "btAlignedAllocator.h" #include "btScalar.h" // has definitions like SIMD_FORCE_INLINE ///If the platform doesn't support placement new, you can disable BT_USE_PLACEMENT_NEW ///then the btAlignedObjectArray doesn't support objects with virtual methods, and non-trivial constructors/destructors ///You can enable BT_USE_MEMCPY, then swapping elements in the array will use memcpy instead of operator= ///see discussion here: http://continuousphysics.com/Bullet/phpBB2/viewtopic.php?t=1231 and ///http://www.continuousphysics.com/Bullet/phpBB2/viewtopic.php?t=1240 #define BT_USE_PLACEMENT_NEW 1 //#define BT_USE_MEMCPY 1 //disable, because it is cumbersome to find out for each platform where memcpy is defined. It can be in <memory.h> or <string.h> or otherwise... #define BT_ALLOW_ARRAY_COPY_OPERATOR // enabling this can accidently perform deep copies of data if you are not careful #ifdef BT_USE_MEMCPY #include <memory.h> #include <string.h> #endif //BT_USE_MEMCPY #ifdef BT_USE_PLACEMENT_NEW #include <new> //for placement new #endif //BT_USE_PLACEMENT_NEW ///The btAlignedObjectArray template class uses a subset of the stl::vector interface for its methods ///It is developed to replace stl::vector to avoid portability issues, including STL alignment issues to add SIMD/SSE data template <typename T> //template <class T> class btAlignedObjectArray { btAlignedAllocator<T, 16> m_allocator; int32_t m_size; int32_t m_capacity; T* m_data; //PCK: added this line bool m_ownsMemory; #ifdef BT_ALLOW_ARRAY_COPY_OPERATOR public: SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other) { copyFromArray(other); return *this; } #else //BT_ALLOW_ARRAY_COPY_OPERATOR private: SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other); #endif //BT_ALLOW_ARRAY_COPY_OPERATOR protected: SIMD_FORCE_INLINE int32_t allocSize(int32_t size) { return (size ? size * 2 : 1); } SIMD_FORCE_INLINE void copy(int32_t start, int32_t end, T* dest) const { int32_t i; for (i = start; i < end; ++i) #ifdef BT_USE_PLACEMENT_NEW new (&dest[i]) T(m_data[i]); #else dest[i] = m_data[i]; #endif //BT_USE_PLACEMENT_NEW } SIMD_FORCE_INLINE void init() { //PCK: added this line m_ownsMemory = true; m_data = 0; m_size = 0; m_capacity = 0; } SIMD_FORCE_INLINE void destroy(int32_t first, int32_t last) { int32_t i; for (i = first; i < last; i++) { m_data[i].~T(); } } SIMD_FORCE_INLINE void* allocate(int32_t size) { if (size) return m_allocator.allocate(size); return 0; } SIMD_FORCE_INLINE void deallocate() { if (m_data) { //PCK: enclosed the deallocation in this block if (m_ownsMemory) { m_allocator.deallocate(m_data); } m_data = 0; } } public: btAlignedObjectArray() { init(); } ~btAlignedObjectArray() { clear(); } ///Generally it is best to avoid using the copy constructor of an btAlignedObjectArray, and use a (const) reference to the array instead. btAlignedObjectArray(const btAlignedObjectArray& otherArray) { init(); int32_t otherSize = otherArray.size(); resize(otherSize); otherArray.copy(0, otherSize, m_data); } /// return the number of elements in the array SIMD_FORCE_INLINE int32_t size() const { return m_size; } SIMD_FORCE_INLINE const T& at(int32_t n) const { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } SIMD_FORCE_INLINE T& at(int32_t n) { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } SIMD_FORCE_INLINE const T& operator[](int32_t n) const { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } SIMD_FORCE_INLINE T& operator[](int32_t n) { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } ///clear the array, deallocated memory. Generally it is better to use array.resize(0), to reduce performance overhead of run-time memory (de)allocations. SIMD_FORCE_INLINE void clear() { destroy(0, size()); deallocate(); init(); } SIMD_FORCE_INLINE void pop_back() { btAssert(m_size > 0); m_size--; m_data[m_size].~T(); } ///resize changes the number of elements in the array. If the new size is larger, the new elements will be constructed using the optional second argument. ///when the new number of elements is smaller, the destructor will be called, but memory will not be freed, to reduce performance overhead of run-time memory (de)allocations. SIMD_FORCE_INLINE void resize(int32_t newsize, const T& fillData = T()) { int32_t curSize = size(); if (newsize < curSize) { for (int32_t i = newsize; i < curSize; i++) { m_data[i].~T(); } } else { if (newsize > size()) { reserve(newsize); } #ifdef BT_USE_PLACEMENT_NEW for (int32_t i = curSize; i < newsize; i++) { new (&m_data[i]) T(fillData); } #endif //BT_USE_PLACEMENT_NEW } m_size = newsize; } SIMD_FORCE_INLINE T& expandNonInitializing() { int32_t sz = size(); if (sz == capacity()) { reserve(allocSize(size())); } m_size++; return m_data[sz]; } SIMD_FORCE_INLINE T& expand(const T& fillValue = T()) { int32_t sz = size(); if (sz == capacity()) { reserve(allocSize(size())); } m_size++; #ifdef BT_USE_PLACEMENT_NEW new (&m_data[sz]) T(fillValue); //use the in-place new (not really allocating heap memory) #endif return m_data[sz]; } SIMD_FORCE_INLINE void push_back(const T& _Val) { int32_t sz = size(); if (sz == capacity()) { reserve(allocSize(size())); } #ifdef BT_USE_PLACEMENT_NEW new (&m_data[m_size]) T(_Val); #else m_data[size()] = _Val; #endif //BT_USE_PLACEMENT_NEW m_size++; } /// return the pre-allocated (reserved) elements, this is at least as large as the total number of elements,see size() and reserve() SIMD_FORCE_INLINE int32_t capacity() const { return m_capacity; } SIMD_FORCE_INLINE void reserve(int32_t _Count) { // determine new minimum length of allocated storage if (capacity() < _Count) { // not enough room, reallocate T* s = (T*)allocate(_Count); copy(0, size(), s); destroy(0, size()); deallocate(); //PCK: added this line m_ownsMemory = true; m_data = s; m_capacity = _Count; } } class less { public: bool operator()(const T& a, const T& b) { return (a < b); } }; template <typename L> void quickSortInternal(const L& CompareFunc, int32_t lo, int32_t hi) { // lo is the lower index, hi is the upper index // of the region of array a that is to be sorted int32_t i = lo, j = hi; T x = m_data[(lo + hi) / 2]; // partition do { while (CompareFunc(m_data[i], x)) i++; while (CompareFunc(x, m_data[j])) j--; if (i <= j) { swap(i, j); i++; j--; } } while (i <= j); // recursion if (lo < j) quickSortInternal(CompareFunc, lo, j); if (i < hi) quickSortInternal(CompareFunc, i, hi); } template <typename L> void quickSort(const L& CompareFunc) { //don't sort 0 or 1 elements if (size() > 1) { quickSortInternal(CompareFunc, 0, size() - 1); } } ///heap sort from http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Sort/Heap/ template <typename L> void downHeap(T* pArr, int32_t k, int32_t n, const L& CompareFunc) { /* PRE: a[k+1..N] is a heap */ /* POST: a[k..N] is a heap */ T temp = pArr[k - 1]; /* k has child(s) */ while (k <= n / 2) { int32_t child = 2 * k; if ((child < n) && CompareFunc(pArr[child - 1], pArr[child])) { child++; } /* pick larger child */ if (CompareFunc(temp, pArr[child - 1])) { /* move child up */ pArr[k - 1] = pArr[child - 1]; k = child; } else { break; } } pArr[k - 1] = temp; } /*downHeap*/ void swap(int32_t index0, int32_t index1) { #ifdef BT_USE_MEMCPY char temp[sizeof(T)]; memcpy(temp, &m_data[index0], sizeof(T)); memcpy(&m_data[index0], &m_data[index1], sizeof(T)); memcpy(&m_data[index1], temp, sizeof(T)); #else T temp = m_data[index0]; m_data[index0] = m_data[index1]; m_data[index1] = temp; #endif //BT_USE_PLACEMENT_NEW } template <typename L> void heapSort(const L& CompareFunc) { /* sort a[0..N-1], N.B. 0 to N-1 */ int32_t k; int32_t n = m_size; for (k = n / 2; k > 0; k--) { downHeap(m_data, k, n, CompareFunc); } /* a[1..N] is now a heap */ while (n >= 1) { swap(0, n - 1); /* largest of a[0..n-1] */ n = n - 1; /* restore a[1..i-1] heap */ downHeap(m_data, 1, n, CompareFunc); } } ///non-recursive binary search, assumes sorted array int32_t findBinarySearch(const T& key) const { int32_t first = 0; int32_t last = size() - 1; //assume sorted array while (first <= last) { int32_t mid = (first + last) / 2; // compute mid point. if (key > m_data[mid]) first = mid + 1; // repeat search in top half. else if (key < m_data[mid]) last = mid - 1; // repeat search in bottom half. else return mid; // found it. return position ///// } return size(); // failed to find key } int32_t findLinearSearch(const T& key) const { int32_t index = size(); int32_t i; for (i = 0; i < size(); i++) { if (m_data[i] == key) { index = i; break; } } return index; } void remove(const T& key) { int32_t findIndex = findLinearSearch(key); if (findIndex < size()) { swap(findIndex, size() - 1); pop_back(); } } //PCK: whole function void initializeFromBuffer(void* buffer, int32_t size, int32_t capacity) { clear(); m_ownsMemory = false; m_data = (T*)buffer; m_size = size; m_capacity = capacity; } void copyFromArray(const btAlignedObjectArray& otherArray) { int32_t otherSize = otherArray.size(); resize(otherSize); otherArray.copy(0, otherSize, m_data); } }; #endif //BT_OBJECT_ARRAY__
12,640
C
27.153675
243
0.55712
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdICHull.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_ICHULL_H #define VHACD_ICHULL_H #include "vhacdManifoldMesh.h" #include "vhacdVector.h" namespace VHACD { //! Incremental Convex Hull algorithm (cf. http://cs.smith.edu/~orourke/books/ftp.html ). enum ICHullError { ICHullErrorOK = 0, ICHullErrorCoplanarPoints, ICHullErrorNoVolume, ICHullErrorInconsistent, ICHullErrorNotEnoughPoints }; class ICHull { public: static const double sc_eps; //! bool IsFlat() { return m_isFlat; } //! Returns the computed mesh TMMesh& GetMesh() { return m_mesh; } //! Add one point to the convex-hull bool AddPoint(const Vec3<double>& point) { return AddPoints(&point, 1); } //! Add one point to the convex-hull bool AddPoint(const Vec3<double>& point, int32_t id); //! Add points to the convex-hull bool AddPoints(const Vec3<double>* points, size_t nPoints); //! ICHullError Process(); //! ICHullError Process(const uint32_t nPointsCH, const double minVolume = 0.0); //! bool IsInside(const Vec3<double>& pt0, const double eps = 0.0); //! const ICHull& operator=(ICHull& rhs); //! Constructor ICHull(); //! Destructor ~ICHull(void){}; private: //! DoubleTriangle builds the initial double triangle. It first finds 3 noncollinear points and makes two faces out of them, in opposite order. It then finds a fourth point that is not coplanar with that face. The vertices are stored in the face structure in counterclockwise order so that the volume between the face and the point is negative. Lastly, the 3 newfaces to the fourth point are constructed and the data structures are cleaned up. ICHullError DoubleTriangle(); //! MakeFace creates a new face structure from three vertices (in ccw order). It returns a pointer to the face. CircularListElement<TMMTriangle>* MakeFace(CircularListElement<TMMVertex>* v0, CircularListElement<TMMVertex>* v1, CircularListElement<TMMVertex>* v2, CircularListElement<TMMTriangle>* fold); //! CircularListElement<TMMTriangle>* MakeConeFace(CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* v); //! bool ProcessPoint(); //! bool ComputePointVolume(double& totalVolume, bool markVisibleFaces); //! bool FindMaxVolumePoint(const double minVolume = 0.0); //! bool CleanEdges(); //! bool CleanVertices(uint32_t& addedPoints); //! bool CleanTriangles(); //! bool CleanUp(uint32_t& addedPoints); //! bool MakeCCW(CircularListElement<TMMTriangle>* f, CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* v); void Clear(); private: static const int32_t sc_dummyIndex; TMMesh m_mesh; SArray<CircularListElement<TMMEdge>*> m_edgesToDelete; SArray<CircularListElement<TMMEdge>*> m_edgesToUpdate; SArray<CircularListElement<TMMTriangle>*> m_trianglesToDelete; Vec3<double> m_normal; bool m_isFlat; ICHull(const ICHull& rhs); }; } #endif // VHACD_ICHULL_H
4,551
C
44.979798
756
0.728631
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/public/VHACD.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_H #define VHACD_H #define VHACD_VERSION_MAJOR 2 #define VHACD_VERSION_MINOR 3 // Changes for version 2.3 // // m_gamma : Has been removed. This used to control the error metric to merge convex hulls. Now it uses the 'm_maxConvexHulls' value instead. // m_maxConvexHulls : This is the maximum number of convex hulls to produce from the merge operation; replaces 'm_gamma'. // // Note that decomposition depth is no longer a user provided value. It is now derived from the // maximum number of hulls requested. // // As a convenience to the user, each convex hull produced now includes the volume of the hull as well as it's center. // // This version supports a convenience method to automatically make V-HACD run asynchronously in a background thread. // To get a fully asynchronous version, call 'CreateVHACD_ASYNC' instead of 'CreateVHACD'. You get the same interface however, // now when computing convex hulls, it is no longer a blocking operation. All callback messages are still returned // in the application's thread so you don't need to worry about mutex locks or anything in that case. // To tell if the operation is complete, the application should call 'IsReady'. This will return true if // the last approximation operation is complete and will dispatch any pending messages. // If you call 'Compute' while a previous operation was still running, it will automatically cancel the last request // and begin a new one. To cancel a currently running approximation just call 'Cancel'. #include <stdint.h> namespace VHACD { class IVHACD { public: class IUserCallback { public: virtual ~IUserCallback(){}; virtual void Update(const double overallProgress, const double stageProgress, const double operationProgress, const char* const stage, const char* const operation) = 0; }; class IUserLogger { public: virtual ~IUserLogger(){}; virtual void Log(const char* const msg) = 0; }; class ConvexHull { public: double* m_points; uint32_t* m_triangles; uint32_t m_nPoints; uint32_t m_nTriangles; double m_volume; double m_center[3]; }; class Parameters { public: Parameters(void) { Init(); } void Init(void) { m_resolution = 100000; m_concavity = 0.001; m_planeDownsampling = 4; m_convexhullDownsampling = 4; m_alpha = 0.05; m_beta = 0.05; m_pca = 0; m_mode = 0; // 0: voxel-based (recommended), 1: tetrahedron-based m_maxNumVerticesPerCH = 64; m_minVolumePerCH = 0.0001; m_callback = 0; m_logger = 0; m_convexhullApproximation = true; m_oclAcceleration = true; m_maxConvexHulls = 1024; m_projectHullVertices = true; // This will project the output convex hull vertices onto the original source mesh to increase the floating point accuracy of the results } double m_concavity; double m_alpha; double m_beta; double m_minVolumePerCH; IUserCallback* m_callback; IUserLogger* m_logger; uint32_t m_resolution; uint32_t m_maxNumVerticesPerCH; uint32_t m_planeDownsampling; uint32_t m_convexhullDownsampling; uint32_t m_pca; uint32_t m_mode; uint32_t m_convexhullApproximation; uint32_t m_oclAcceleration; uint32_t m_maxConvexHulls; bool m_projectHullVertices; }; class Constraint { public: uint32_t mHullA; // Convex Hull A index uint32_t mHullB; // Convex Hull B index double mConstraintPoint[3]; // The point of intersection between the two convex hulls }; virtual void Cancel() = 0; virtual bool Compute(const float* const points, const uint32_t countPoints, const uint32_t* const triangles, const uint32_t countTriangles, const Parameters& params) = 0; virtual bool Compute(const double* const points, const uint32_t countPoints, const uint32_t* const triangles, const uint32_t countTriangles, const Parameters& params) = 0; virtual uint32_t GetNConvexHulls() const = 0; virtual void GetConvexHull(const uint32_t index, ConvexHull& ch) const = 0; virtual void Clean(void) = 0; // release internally allocated memory virtual void Release(void) = 0; // release IVHACD virtual bool OCLInit(void* const oclDevice, IUserLogger* const logger = 0) = 0; virtual bool OCLRelease(IUserLogger* const logger = 0) = 0; // Will compute the center of mass of the convex hull decomposition results and return it // in 'centerOfMass'. Returns false if the center of mass could not be computed. virtual bool ComputeCenterOfMass(double centerOfMass[3]) const = 0; // Will analyze the HACD results and compute the constraints solutions. // It will analyze the point at which any two convex hulls touch each other and // return the total number of constraint pairs found virtual uint32_t ComputeConstraints(void) = 0; // Returns a pointer to the constraint index; null if the index is not valid or // the user did not previously call 'ComputeConstraints' virtual const Constraint *GetConstraint(uint32_t index) const = 0; // In synchronous mode (non-multi-threaded) the state is always 'ready' // In asynchronous mode, this returns true if the background thread is not still actively computing // a new solution. In an asynchronous config the 'IsReady' call will report any update or log // messages in the caller's current thread. virtual bool IsReady(void) const { return true; } protected: virtual ~IVHACD(void) {} }; IVHACD* CreateVHACD(void); IVHACD* CreateVHACD_ASYNC(void); } #endif // VHACD_H
7,574
C
43.298245
756
0.686163
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/assetutils/NvBlastExtAssetUtils.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtAssetUtils.h" #include "NvBlast.h" #include "NvBlastIndexFns.h" #include "NvBlastMemory.h" #include "NvBlastGlobals.h" #include "math.h" using namespace Nv::Blast; /** Fill the chunk and bond descriptors from an asset. \param[out] chunkDescsWritten the number of chunk descriptors written to chunkDescs \param[out] bondDescsWritten the number of bond descriptors written to bondDescs \param[out] chunkDescs user-supplied buffer of NvBlastChunkDesc. Size must be at least NvBlastAssetGetChunkCount(asset, logFn) \param[out] bondDescs user-supplied buffer of NvBlastBondDesc. Size must be at least NvBlastAssetGetBondCount(asset, logFn) \param[in] asset asset from which to extract descriptors */ static void fillChunkAndBondDescriptorsFromAsset ( uint32_t& chunkDescsWritten, uint32_t& bondDescsWritten, NvBlastChunkDesc* chunkDescs, NvBlastBondDesc* bondDescs, const NvBlastAsset* asset ) { chunkDescsWritten = 0; bondDescsWritten = 0; // Chunk descs const uint32_t assetChunkCount = NvBlastAssetGetChunkCount(asset, logLL); const NvBlastChunk* assetChunk = NvBlastAssetGetChunks(asset, logLL); for (uint32_t i = 0; i < assetChunkCount; ++i, ++assetChunk) { NvBlastChunkDesc& chunkDesc = chunkDescs[chunkDescsWritten++]; memcpy(chunkDesc.centroid, assetChunk->centroid, sizeof(float) * 3); chunkDesc.volume = assetChunk->volume; chunkDesc.parentChunkDescIndex = assetChunk->parentChunkIndex; chunkDesc.flags = 0; // To be filled in below chunkDesc.userData = assetChunk->userData; } // Bond descs const uint32_t assetBondCount = NvBlastAssetGetBondCount(asset, logLL); const NvBlastBond* assetBond = NvBlastAssetGetBonds(asset, logLL); for (uint32_t i = 0; i < assetBondCount; ++i, ++assetBond) { NvBlastBondDesc& bondDesc = bondDescs[bondDescsWritten++]; memcpy(&bondDesc.bond, assetBond, sizeof(NvBlastBond)); } // Walk the graph and restore connection descriptors const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, logLL); for (uint32_t i = 0; i < graph.nodeCount; ++i) { const int32_t currentChunk = graph.chunkIndices[i]; if (isInvalidIndex(currentChunk)) { continue; } chunkDescs[currentChunk].flags |= NvBlastChunkDesc::SupportFlag; // Filling in chunk flags here for (uint32_t j = graph.adjacencyPartition[i]; j < graph.adjacencyPartition[i + 1]; ++j) { NvBlastBondDesc& bondDesc = bondDescs[graph.adjacentBondIndices[j]]; bondDesc.chunkIndices[0] = currentChunk; const uint32_t adjacentChunkIndex = graph.chunkIndices[graph.adjacentNodeIndices[j]]; bondDesc.chunkIndices[1] = adjacentChunkIndex; } } } /** Scale a 3-vector v in-place. \param[in,out] v The vector to scale. \param[in] s The scale. Represents the diagonal elements of a diagonal matrix. The result will be v <- s*v. */ static inline void scale(NvcVec3& v, const NvcVec3& s) { v.x *= s.x; v.y *= s.y; v.z *= s.z; } /** Rotate a 3-vector v in-place using a rotation represented by a quaternion q. \param[in,out] v The vector to rotate. \param[in] q The quaternion representation the rotation. The format of q is { x, y, z, w } where (x,y,z) is the vector part and w is the scalar part. The quaternion q MUST be normalized. */ static inline void rotate(NvcVec3& v, const NvcQuat& q) { const float vx = 2.0f * v.x; const float vy = 2.0f * v.y; const float vz = 2.0f * v.z; const float w2 = q.w * q.w - 0.5f; const float dot2 = (q.x * vx + q.y * vy + q.z * vz); v.x = vx * w2 + (q.y * vz - q.z * vy) * q.w + q.x * dot2; v.y = vy * w2 + (q.z * vx - q.x * vz) * q.w + q.y * dot2; v.z = vz * w2 + (q.x * vy - q.y * vx) * q.w + q.z * dot2; } /** Translate a 3-vector v in-place. \param[in,out] v The vector to translate. \param[in] t The translation. The result will be v <- v+t. */ static inline void translate(NvcVec3& v, const NvcVec3& t) { v.x += t.x; v.y += t.y; v.z += t.z; } NvBlastAsset* NvBlastExtAssetUtilsAddExternalBonds ( const NvBlastAsset* asset, const uint32_t* externalBoundChunks, uint32_t externalBoundChunkCount, const NvcVec3* bondDirections, const uint32_t* bondUserData ) { const uint32_t chunkCount = NvBlastAssetGetChunkCount(asset, logLL); const uint32_t oldBondCount = NvBlastAssetGetBondCount(asset, logLL); const uint32_t newBondCount = oldBondCount + externalBoundChunkCount; NvBlastChunkDesc* chunkDescs = static_cast<NvBlastChunkDesc*>(NVBLAST_ALLOC(chunkCount * sizeof(NvBlastChunkDesc))); NvBlastBondDesc* bondDescs = static_cast<NvBlastBondDesc*>(NVBLAST_ALLOC(newBondCount * sizeof(NvBlastBondDesc))); // Create chunk descs uint32_t chunkDescsWritten; uint32_t bondDescsWritten; fillChunkAndBondDescriptorsFromAsset(chunkDescsWritten, bondDescsWritten, chunkDescs, bondDescs, asset); // Add world bonds uint32_t bondCount = oldBondCount; for (uint32_t i = 0; i < externalBoundChunkCount; i++) { NvBlastBondDesc& bondDesc = bondDescs[bondCount++]; const uint32_t chunkIndex = externalBoundChunks[i]; bondDesc.chunkIndices[0] = chunkIndex; bondDesc.chunkIndices[1] = invalidIndex<uint32_t>(); memcpy(&bondDesc.bond.normal, bondDirections + i, sizeof(float) * 3); bondDesc.bond.area = 1.0f; // Should be set by user memcpy(&bondDesc.bond.centroid, chunkDescs[chunkIndex].centroid, sizeof(float) * 3); bondDesc.bond.userData = bondUserData != nullptr ? bondUserData[i] : 0; } // Create new asset NvBlastAssetDesc assetDesc; assetDesc.chunkCount = chunkCount; assetDesc.chunkDescs = chunkDescs; assetDesc.bondCount = bondCount; assetDesc.bondDescs = bondDescs; void* scratch = NVBLAST_ALLOC(NvBlastGetRequiredScratchForCreateAsset(&assetDesc, logLL)); NvBlastAsset* newAsset = NvBlastCreateAsset(NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&assetDesc, logLL)), &assetDesc, scratch, logLL); // Free buffers NVBLAST_FREE(scratch); NVBLAST_FREE(bondDescs); NVBLAST_FREE(chunkDescs); return newAsset; } NvBlastAssetDesc NvBlastExtAssetUtilsCreateDesc(const NvBlastAsset* asset) { return NvBlastExtAssetUtilsMergeAssets(&asset, nullptr, nullptr, nullptr, 1, nullptr, 0, nullptr, nullptr, 0); } NvBlastAssetDesc NvBlastExtAssetUtilsMergeAssets ( const NvBlastAsset** components, const NvcVec3* scales, const NvcQuat* rotations, const NvcVec3* translations, uint32_t componentCount, const NvBlastExtAssetUtilsBondDesc* newBondDescs, uint32_t newBondCount, uint32_t* chunkIndexOffsets, uint32_t* chunkReorderMap, uint32_t chunkReorderMapSize ) { // Count the total number of chunks and bonds in the new asset uint32_t totalChunkCount = 0; uint32_t totalBondCount = newBondCount; for (uint32_t c = 0; c < componentCount; ++c) { totalChunkCount += NvBlastAssetGetChunkCount(components[c], logLL); totalBondCount += NvBlastAssetGetBondCount(components[c], logLL); } // Allocate space for chunk and bond descriptors NvBlastChunkDesc* chunkDescs = static_cast<NvBlastChunkDesc*>(NVBLAST_ALLOC(totalChunkCount * sizeof(NvBlastChunkDesc))); NvBlastBondDesc* bondDescs = static_cast<NvBlastBondDesc*>(NVBLAST_ALLOC(totalBondCount * sizeof(NvBlastBondDesc))); // Create a list of chunk index offsets per component uint32_t* offsetStackAlloc = static_cast<uint32_t*>(NvBlastAlloca(componentCount * sizeof(uint32_t))); if (chunkIndexOffsets == nullptr) { chunkIndexOffsets = offsetStackAlloc; // Use local stack alloc if no array is provided } // Fill the chunk and bond descriptors from the components uint32_t chunkCount = 0; uint32_t bondCount = 0; for (uint32_t c = 0; c < componentCount; ++c) { chunkIndexOffsets[c] = chunkCount; uint32_t componentChunkCount; uint32_t componentBondCount; fillChunkAndBondDescriptorsFromAsset(componentChunkCount, componentBondCount, chunkDescs + chunkCount, bondDescs + bondCount, components[c]); // Fix chunks' parent indices for (uint32_t i = 0; i < componentChunkCount; ++i) { if (!isInvalidIndex(chunkDescs[chunkCount + i].parentChunkDescIndex)) { chunkDescs[chunkCount + i].parentChunkDescIndex += chunkCount; } } // Fix bonds' chunk indices for (uint32_t i = 0; i < componentBondCount; ++i) { NvBlastBondDesc& bondDesc = bondDescs[bondCount + i]; for (int j = 0; j < 2; ++j) { if (!isInvalidIndex(bondDesc.chunkIndices[j])) { bondDesc.chunkIndices[j] += chunkCount; } } } // Transform geometric data if (scales != nullptr) { const NvcVec3& S = scales[c]; NvcVec3 cofS = { S.y * S.z, S.z * S.x, S.x * S.y }; float absDetS = S.x * S.y * S.z; const float sgnDetS = absDetS < 0.0f ? -1.0f : 1.0f; absDetS *= sgnDetS; for (uint32_t i = 0; i < componentChunkCount; ++i) { scale(reinterpret_cast<NvcVec3&>(chunkDescs[chunkCount + i].centroid), S); chunkDescs[chunkCount + i].volume *= absDetS; } for (uint32_t i = 0; i < componentBondCount; ++i) { NvBlastBond& bond = bondDescs[bondCount + i].bond; scale(reinterpret_cast<NvcVec3&>(bond.normal), cofS); float renorm = sqrtf(bond.normal[0] * bond.normal[0] + bond.normal[1] * bond.normal[1] + bond.normal[2] * bond.normal[2]); bond.area *= renorm; if (renorm != 0) { renorm = sgnDetS / renorm; bond.normal[0] *= renorm; bond.normal[1] *= renorm; bond.normal[2] *= renorm; } scale(reinterpret_cast<NvcVec3&>(bond.centroid), S); } } if (rotations != nullptr) { for (uint32_t i = 0; i < componentChunkCount; ++i) { rotate(reinterpret_cast<NvcVec3&>(chunkDescs[chunkCount + i].centroid), rotations[c]); } for (uint32_t i = 0; i < componentBondCount; ++i) { NvBlastBond& bond = bondDescs[bondCount + i].bond; rotate(reinterpret_cast<NvcVec3&>(bond.normal), rotations[c]); // Normal can be transformed this way since we aren't scaling rotate(reinterpret_cast<NvcVec3&>(bond.centroid), rotations[c]); } } if (translations != nullptr) { for (uint32_t i = 0; i < componentChunkCount; ++i) { translate(reinterpret_cast<NvcVec3&>(chunkDescs[chunkCount + i].centroid), translations[c]); } for (uint32_t i = 0; i < componentBondCount; ++i) { translate(reinterpret_cast<NvcVec3&>(bondDescs[bondCount + i].bond.centroid), translations[c]); } } chunkCount += componentChunkCount; bondCount += componentBondCount; } // Fill the bond descriptors from the new bond descs for (uint32_t b = 0; b < newBondCount; ++b) { const NvBlastExtAssetUtilsBondDesc& newBondDesc = newBondDescs[b]; NvBlastBondDesc& bondDesc = bondDescs[bondCount++]; memcpy(&bondDesc.bond, &newBondDesc.bond, sizeof(NvBlastBond)); bondDesc.chunkIndices[0] = !isInvalidIndex(newBondDesc.chunkIndices[0]) ? newBondDesc.chunkIndices[0] + chunkIndexOffsets[newBondDesc.componentIndices[0]] : invalidIndex<uint32_t>(); bondDesc.chunkIndices[1] = !isInvalidIndex(newBondDesc.chunkIndices[1]) ? newBondDesc.chunkIndices[1] + chunkIndexOffsets[newBondDesc.componentIndices[1]] : invalidIndex<uint32_t>(); } // Create new asset desriptor NvBlastAssetDesc assetDesc; assetDesc.chunkCount = chunkCount; assetDesc.chunkDescs = chunkDescs; assetDesc.bondCount = bondCount; assetDesc.bondDescs = bondDescs; // Massage the descriptors so that they are valid for scratch creation void* scratch = NVBLAST_ALLOC(chunkCount * sizeof(NvBlastChunkDesc)); // Enough for NvBlastEnsureAssetExactSupportCoverage and NvBlastReorderAssetDescChunks NvBlastEnsureAssetExactSupportCoverage(chunkDescs, chunkCount, scratch, logLL); if (chunkReorderMapSize < chunkCount) { if (chunkReorderMap != nullptr) { // Chunk reorder map is not large enough. Fill it with invalid indices and don't use it. memset(chunkReorderMap, 0xFF, chunkReorderMapSize * sizeof(uint32_t)); NVBLAST_LOG_WARNING("NvBlastExtAssetUtilsMergeAssets: insufficient chunkReorderMap array passed in. NvBlastReorderAssetDescChunks will not be used."); } chunkReorderMap = nullptr; // Don't use } if (chunkReorderMap != nullptr) { NvBlastReorderAssetDescChunks(chunkDescs, chunkCount, bondDescs, bondCount, chunkReorderMap, true, scratch, logLL); } NVBLAST_FREE(scratch); return assetDesc; } /** Multiply a 3-vector v in-place by value. \param[in,out] v The vector to multiply. \param[in] m The 3x3 matrix. */ static inline void multiply(NvcVec3& v, float value) { v.x *= value; v.y *= value; v.z *= value; } /** Get Vec3 length */ static inline float length(const NvcVec3& p) { return sqrtf(p.x * p.x + p.y * p.y + p.z * p.z); } /** Transform a point in-place: scale, rotate, then translate \param[in,out] p The point to transform. \param[in] S The diagonal elements of a diagonal scale matrix. \param[in] R A quaternion representing the rotation. Must be normalized. \param[in] T The translation vector. */ static inline void transform(NvcVec3& p, const NvcVec3& S, const NvcQuat& R, const NvcVec3& T) { scale(p, S); rotate(p, R); translate(p, T); } /** Transform a vector in-place: scale, then rotate \param[in,out] v The vector to transform. \param[in] S The diagonal elements of a diagonal scale matrix. \param[in] R A quaternion representing the rotation. Must be normalized. */ static inline void transform(NvcVec3& v, const NvcVec3& S, const NvcQuat& R) { scale(v, S); rotate(v, R); } void NvBlastExtAssetTransformInPlace(NvBlastAsset* asset, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation) { // Local copies of scaling (S), rotation (R), and translation (T) NvcVec3 S = { 1, 1, 1 }; NvcQuat R = { 0, 0, 0, 1 }; NvcVec3 T = { 0, 0, 0 }; NvcVec3 cofS = { 1, 1, 1 }; float absDetS = 1; float sgnDetS = 1; { if (rotation) { R = *rotation; } if (scaling) { S = *scaling; cofS.x = S.y * S.z; cofS.y = S.z * S.x; cofS.z = S.x * S.y; absDetS = S.x * S.y * S.z; sgnDetS = absDetS < 0.0f ? -1.0f : 1.0f; absDetS *= sgnDetS; } if (translation) { T = *translation; } } // Chunk descs const uint32_t assetChunkCount = NvBlastAssetGetChunkCount(asset, logLL); NvBlastChunk* assetChunk = const_cast<NvBlastChunk*>(NvBlastAssetGetChunks(asset, logLL)); for (uint32_t i = 0; i < assetChunkCount; ++i, ++assetChunk) { transform(reinterpret_cast<NvcVec3&>(assetChunk->centroid), S, R, T); assetChunk->volume *= absDetS; // Use |detS| to keep the volume positive } // Bond descs const uint32_t assetBondCount = NvBlastAssetGetBondCount(asset, logLL); NvBlastBond* assetBond = const_cast<NvBlastBond*>(NvBlastAssetGetBonds(asset, logLL)); for (uint32_t i = 0; i < assetBondCount; ++i, ++assetBond) { transform(reinterpret_cast<NvcVec3&>(assetBond->centroid), S, R, T); NvcVec3& normal = reinterpret_cast<NvcVec3&>(assetBond->normal); transform(normal, cofS, R); const float l = length(normal); assetBond->area *= l; multiply(normal, l > 0.f ? sgnDetS / l : 1.f); } }
18,148
C++
36.114519
190
0.648556
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/stress/NvBlastExtStressSolver.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtStressSolver.h" #include "NvBlast.h" #include "NvBlastGlobals.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include "NvBlastHashSet.h" #include "NvBlastAssert.h" #include "NvBlastIndexFns.h" #include "NsFPU.h" #include "NvBlastNvSharedHelpers.h" #include "NvCMath.h" #include "stress.h" #include "buffer.h" #include "simd/simd_device_query.h" #include <algorithm> #define USE_SCALAR_IMPL 0 #define WARM_START 1 #define GRAPH_INTERGRIRY_CHECK 0 #if GRAPH_INTERGRIRY_CHECK #include <set> #endif namespace Nv { namespace Blast { using namespace nvidia; static_assert(sizeof(NvVec3) == sizeof(NvcVec3), "sizeof(NvVec3) must equal sizeof(NvcVec3)."); static_assert(offsetof(NvVec3, x) == offsetof(NvcVec3, x) && offsetof(NvVec3, y) == offsetof(NvcVec3, y) && offsetof(NvVec3, z) == offsetof(NvcVec3, z), "Elements of NvVec3 and NvcVec3 must have the same struct offset."); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Conjugate Gradient Solver /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// class ConjugateGradientImpulseSolver { public: ConjugateGradientImpulseSolver(uint32_t nodeCount, uint32_t maxBondCount) { m_bonds.reserve(maxBondCount); m_impulses.reserve(maxBondCount); reset(nodeCount); } void getBondImpulses(uint32_t bond, NvVec3& impulseLinear, NvVec3& impulseAngular) const { NVBLAST_ASSERT(bond < m_impulses.size()); const AngLin6& f = m_impulses[bond]; *(NvcVec3*)&impulseAngular = f.ang; *(NvcVec3*)&impulseLinear = f.lin; } void getBondNodes(uint32_t bond, uint32_t& node0, uint32_t& node1) const { NVBLAST_ASSERT(bond < m_bonds.size()); const SolverBond& b = m_bonds[bond]; node0 = b.nodes[0]; node1 = b.nodes[1]; } uint32_t getBondCount() const { return m_bonds.size(); } uint32_t getNodeCount() const { return m_nodes.size(); } void setNodeMassInfo(uint32_t node, const NvVec3& CoM, float mass, float inertia) { NVBLAST_ASSERT(node < m_nodes.size()); SolverNodeS& n = m_nodes[node]; n.CoM = { CoM.x, CoM.y, CoM.z }; n.mass = std::max(mass, 0.0f); // No negative masses, but 0 is meaningful (== infinite) n.inertia = std::max(inertia, 0.0f); // Ditto for inertia m_forceColdStart = true; } void initialize() { StressProcessor::DataParams params; params.centerBonds = true; params.equalizeMasses = true; m_stressProcessor.prepare(m_nodes.begin(), m_nodes.size(), m_bonds.begin(), m_bonds.size(), params); } void setNodeVelocities(uint32_t node, const NvVec3& velocityLinear, const NvVec3& velocityAngular) { NVBLAST_ASSERT(node < m_velocities.size()); AngLin6& v = m_velocities[node]; v.ang = { velocityAngular.x, velocityAngular.y, velocityAngular.z }; v.lin = { velocityLinear.x, velocityLinear.y, velocityLinear.z }; m_inputsChanged = true; } uint32_t addBond(uint32_t node0, uint32_t node1, const NvVec3& bondCentroid) { SolverBond b; b.nodes[0] = node0; b.nodes[1] = node1; b.centroid = { bondCentroid.x, bondCentroid.y, bondCentroid.z }; m_bonds.pushBack(b); m_impulses.push_back({{0,0,0},{0,0,0}}); m_forceColdStart = true; return m_bonds.size() - 1; } void replaceWithLast(uint32_t bondIndex) { m_bonds.replaceWithLast(bondIndex); if ((size_t)bondIndex + 2 < m_impulses.size()) { m_impulses[bondIndex] = m_impulses.back(); m_impulses.resize(m_impulses.size() - 1); } m_stressProcessor.removeBond(bondIndex); } void reset(uint32_t nodeCount) { m_nodes.resize(nodeCount); memset(m_nodes.begin(), 0, sizeof(SolverNodeS)*nodeCount); m_velocities.resize(nodeCount); memset(m_velocities.data(), 0, sizeof(AngLin6)*nodeCount); clearBonds(); m_error_sq = {FLT_MAX, FLT_MAX}; m_converged = false; m_forceColdStart = true; m_inputsChanged = true; } void clearBonds() { m_bonds.clear(); m_impulses.resize(0); m_forceColdStart = true; } void solve(uint32_t iterationCount, bool warmStart = true) { StressProcessor::SolverParams params; params.maxIter = iterationCount; params.tolerance = 0.001f; params.warmStart = warmStart && !m_forceColdStart; m_converged = (m_stressProcessor.solve(m_impulses.data(), m_velocities.data(), params, &m_error_sq) >= 0); m_forceColdStart = false; m_inputsChanged = false; } bool calcError(float& linear, float& angular) const { linear = sqrtf(m_error_sq.lin); angular = sqrtf(m_error_sq.ang); return m_converged; } private: Array<SolverNodeS>::type m_nodes; Array<SolverBond>::type m_bonds; StressProcessor m_stressProcessor; POD_Buffer<AngLin6> m_velocities; POD_Buffer<AngLin6> m_impulses; AngLin6ErrorSq m_error_sq; bool m_converged; bool m_forceColdStart; bool m_inputsChanged; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Graph Processor /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #if GRAPH_INTERGRIRY_CHECK #define CHECK_GRAPH_INTEGRITY checkGraphIntegrity() #else #define CHECK_GRAPH_INTEGRITY ((void)0) #endif class SupportGraphProcessor { public: struct BondData { uint32_t node0; uint32_t node1; uint32_t blastBondIndex; // linear stresses float stressNormal; // negative values represent compression pressure, positive represent tension float stressShear; // The normal used to compute stress values // Can be different than the bond normal if graph reduction is used // and multiple bonds are grouped together nvidia::NvVec3 normal; // Centroid used to compute node offsets, instead of assuming the bond is halfway between node positions. // This also allows the bonds to the world node to be drawn nvidia::NvVec3 centroid; }; struct NodeData { float mass; float volume; NvVec3 localPos; NvVec3 localVel; uint32_t solverNode; uint32_t neighborsCount; }; struct SolverNodeData { uint32_t supportNodesCount; NvVec3 localPos; union { float mass; int32_t indexShift; }; float volume; }; struct SolverBondData { InlineArray<uint32_t, 8>::type blastBondIndices; }; SupportGraphProcessor(uint32_t nodeCount, uint32_t maxBondCount) : m_solver(nodeCount, maxBondCount), m_nodesDirty(true), m_bondsDirty(true) { m_nodesData.resize(nodeCount); m_bondsData.reserve(maxBondCount); m_solverNodesData.resize(nodeCount); m_solverBondsData.reserve(maxBondCount); m_solverBondsMap.reserve(maxBondCount); m_blastBondIndexMap.resize(maxBondCount); memset(m_blastBondIndexMap.begin(), 0xFF, m_blastBondIndexMap.size() * sizeof(uint32_t)); resetVelocities(); } const NodeData& getNodeData(uint32_t node) const { return m_nodesData[node]; } const BondData& getBondData(uint32_t bond) const { return m_bondsData[bond]; } const SolverNodeData& getSolverNodeData(uint32_t node) const { return m_solverNodesData[node]; } const SolverBondData& getSolverBondData(uint32_t bond) const { return m_solverBondsData[bond]; } void getSolverInternalBondImpulses(uint32_t bond, NvVec3& impulseLinear, NvVec3& impulseAngular) const { m_solver.getBondImpulses(bond, impulseLinear, impulseAngular); } void getSolverInternalBondNodes(uint32_t bond, uint32_t& node0, uint32_t& node1) const { m_solver.getBondNodes(bond, node0, node1); } uint32_t getBondCount() const { return m_bondsData.size(); } uint32_t getNodeCount() const { return m_nodesData.size();; } uint32_t getSolverBondCount() const { return m_solverBondsData.size(); } uint32_t getSolverNodeCount() const { return m_solverNodesData.size();; } uint32_t getOverstressedBondCount() const { return m_overstressedBondCount; } void calcSolverBondStresses( uint32_t bondIdx, float bondArea, float nodeDist, const nvidia::NvVec3& bondNormal, float& stressNormal, float& stressShear) const { if (!canTakeDamage(bondArea)) { stressNormal = stressShear = 0.0f; return; } // impulseLinear in the direction of the bond normal is stressNormal, perpendicular is stressShear // ignore impulseAngular for now, not sure how to account for that // convert to pressure to factor out area NvVec3 impulseLinear, impulseAngular; getSolverInternalBondImpulses(bondIdx, impulseLinear, impulseAngular); const float normalComponentLinear = impulseLinear.dot(bondNormal); stressNormal = normalComponentLinear / bondArea; const float impulseLinearMagSqr = impulseLinear.magnitudeSquared(); stressShear = sqrtf(impulseLinearMagSqr - normalComponentLinear * normalComponentLinear) / bondArea; // impulseAngular in the direction of the bond normal is twist, perpendicular is bend // take abs() of the dot product because only the magnitude of the twist matters, not direction const float normalComponentAngular = abs(impulseAngular.dot(bondNormal)); const float twist = normalComponentAngular / bondArea; const float impulseAngularMagSqr = impulseAngular.magnitudeSquared(); const float bend = sqrtf(impulseAngularMagSqr - normalComponentAngular * normalComponentAngular) / bondArea; // interpret angular pressure as a composition of linear pressures // dividing by nodeDist for scaling const float twistContribution = twist * 2.0f / nodeDist; stressShear += twistContribution; const float bendContribution = bend * 2.0f / nodeDist; stressNormal += copysignf(bendContribution, stressNormal); } float mapStressToRange(float stress, float elasticLimit, float fatalLimit) const { if (stress < elasticLimit) { return 0.5f * stress / elasticLimit; } else { return fatalLimit > elasticLimit ? 0.5f + 0.5f * (stress - elasticLimit) / (fatalLimit - elasticLimit) : 1.0f; } } float getSolverBondStressPct(uint32_t bondIdx, const float* bondHealths, const ExtStressSolverSettings& settings, ExtStressSolver::DebugRenderMode mode) const { // sum up the stress of all underlying bonds involved in this stress solver bond float compressionStress, tensionStress, shearStress; float stress = -1.0f; const auto& blastBondIndices = m_solverBondsData[bondIdx].blastBondIndices; for (const auto blastBondIndex : blastBondIndices) { // only consider the stress values on bonds that are intact if (bondHealths[blastBondIndex] > 0.0f && getBondStress(blastBondIndex, compressionStress, tensionStress, shearStress)) { if (mode == ExtStressSolver::STRESS_PCT_COMPRESSION || mode == ExtStressSolver::STRESS_PCT_MAX) { compressionStress = mapStressToRange(compressionStress, settings.compressionElasticLimit, settings.compressionFatalLimit); stress = std::max(compressionStress, stress); } if (mode == ExtStressSolver::STRESS_PCT_TENSION || mode == ExtStressSolver::STRESS_PCT_MAX) { tensionStress = mapStressToRange(tensionStress, settings.tensionElasticLimit, settings.tensionFatalLimit); stress = std::max(tensionStress, stress); } if (mode == ExtStressSolver::STRESS_PCT_SHEAR || mode == ExtStressSolver::STRESS_PCT_MAX) { shearStress = mapStressToRange(shearStress, settings.shearElasticLimit, settings.shearFatalLimit); stress = std::max(shearStress, stress); } // all bonds in the group share the same stress values, no need to keep iterating break; } } // return a value < 0.0f if all bonds are broken return stress; } void setNodeInfo(uint32_t node, float mass, float volume, NvVec3 localPos) { m_nodesData[node].mass = mass; m_nodesData[node].volume = volume; m_nodesData[node].localPos = localPos; m_nodesDirty = true; } void setNodeNeighborsCount(uint32_t node, uint32_t neighborsCount) { // neighbors count is expected to be the number of nodes on 1 island/actor. m_nodesData[node].neighborsCount = neighborsCount; // check for too huge aggregates (happens after island's split) if (!m_nodesDirty) { m_nodesDirty |= (m_solverNodesData[m_nodesData[node].solverNode].supportNodesCount > neighborsCount / 2); } } void addNodeForce(uint32_t node, const NvVec3& force, ExtForceMode::Enum mode) { const float mass = m_nodesData[node].mass; if (mass > 0) { // NOTE - passing in acceleration as velocity. The impulse solver's output will be interpreted as force. m_nodesData[node].localVel += (mode == ExtForceMode::FORCE) ? force/mass : force; } } void addBond(uint32_t node0, uint32_t node1, uint32_t blastBondIndex) { if (isInvalidIndex(m_blastBondIndexMap[blastBondIndex])) { const BondData data = { node0, node1, blastBondIndex, 0.0f }; m_bondsData.pushBack(data); m_blastBondIndexMap[blastBondIndex] = m_bondsData.size() - 1; } } void removeBondIfExists(uint32_t blastBondIndex) { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; if (!isInvalidIndex(bondIndex)) { const BondData& bond = m_bondsData[bondIndex]; const uint32_t solverNode0 = m_nodesData[bond.node0].solverNode; const uint32_t solverNode1 = m_nodesData[bond.node1].solverNode; bool isBondInternal = (solverNode0 == solverNode1); if (isBondInternal) { // internal bond sadly requires graph resync (it never happens on reduction level '0') m_nodesDirty = true; } else if (!m_nodesDirty) { // otherwise it's external bond, we can remove it manually and keep graph synced // we don't need to spend time there if (m_nodesDirty == true), graph will be resynced anyways BondKey solverBondKey(solverNode0, solverNode1); auto entry = m_solverBondsMap.find(solverBondKey); if (entry) { const uint32_t solverBondIndex = entry->second; auto& blastBondIndices = m_solverBondsData[solverBondIndex].blastBondIndices; blastBondIndices.findAndReplaceWithLast(blastBondIndex); if (blastBondIndices.empty()) { // all bonds associated with this solver bond were removed, so let's remove solver bond m_solverBondsData.replaceWithLast(solverBondIndex); m_solver.replaceWithLast(solverBondIndex); if (m_solver.getBondCount() > 0) { // update 'previously last' solver bond mapping uint32_t node0, node1; m_solver.getBondNodes(solverBondIndex, node0, node1); m_solverBondsMap[BondKey(node0, node1)] = solverBondIndex; } m_solverBondsMap.erase(solverBondKey); } } CHECK_GRAPH_INTEGRITY; } // remove bond from graph processor's list m_blastBondIndexMap[blastBondIndex] = invalidIndex<uint32_t>(); m_bondsData.replaceWithLast(bondIndex); m_blastBondIndexMap[m_bondsData[bondIndex].blastBondIndex] = m_bondsData.size() > bondIndex ? bondIndex : invalidIndex<uint32_t>(); } } void setGraphReductionLevel(uint32_t level) { m_graphReductionLevel = level; m_nodesDirty = true; } uint32_t getGraphReductionLevel() const { return m_graphReductionLevel; } void solve(const ExtStressSolverSettings& settings, const float* bondHealth, const NvBlastBond* bonds, bool warmStart = true) { sync(bonds); for (const NodeData& node : m_nodesData) { m_solver.setNodeVelocities(node.solverNode, node.localVel, NvVec3(NvZero)); } m_solver.solve(settings.maxSolverIterationsPerFrame, warmStart); resetVelocities(); updateBondStress(settings, bondHealth, bonds); } bool calcError(float& linear, float& angular) const { return m_solver.calcError(linear, angular); } bool getBondStress(uint32_t blastBondIndex, float& compression, float& tension, float& shear) const { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; if (isInvalidIndex(bondIndex)) { return false; } // compression and tension are mutually exclusive since they operate in opposite directions // they both measure stress parallel to the bond normal direction // compression is the force resisting two nodes being pushed together (it pushes them apart) // tension is the force resisting two nodes being pulled apart (it pulls them together) if (m_bondsData[bondIndex].stressNormal <= 0.0f) { compression = -m_bondsData[bondIndex].stressNormal; tension = 0.0f; } else { compression = 0.0f; tension = m_bondsData[bondIndex].stressNormal; } // shear is independent and can co-exist with compression and tension shear = m_bondsData[bondIndex].stressShear; // the force perpendicular to the bond normal direction return true; } // Convert from Blast bond index to internal stress solver bond index // Will be InvalidIndex if the internal bond was removed from the stress solver uint32_t getInternalBondIndex(uint32_t blastBondIndex) { return m_blastBondIndexMap[blastBondIndex]; } private: void resetVelocities() { for (auto& node : m_nodesData) { node.localVel = NvVec3(NvZero); } } void updateBondStress(const ExtStressSolverSettings& settings, const float* bondHealth, const NvBlastBond* bonds) { m_overstressedBondCount = 0; Array<uint32_t>::type bondIndicesToRemove; bondIndicesToRemove.reserve(getBondCount()); for (uint32_t i = 0; i < m_solverBondsData.size(); ++i) { // calculate the total area of all bonds involved so pressure can be calculated float totalArea = 0.0f; // calculate an average normal and centroid for all bonds as well, weighted by their area nvidia::NvVec3 bondNormal(NvZero); nvidia::NvVec3 bondCentroid(NvZero); nvidia::NvVec3 averageNodeDisp(NvZero); const auto& blastBondIndices = m_solverBondsData[i].blastBondIndices; for (auto blastBondIndex : blastBondIndices) { if (bondHealth[blastBondIndex] > 0.0f) { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; const BondData& bond = m_bondsData[bondIndex]; const nvidia::NvVec3 nodeDisp = m_nodesData[bond.node1].localPos - m_nodesData[bond.node0].localPos; // the current health of a bond is the effective area remaining const float remainingArea = bondHealth[blastBondIndex]; const NvBlastBond& blastBond = bonds[blastBondIndex]; // Align normal(s) with node displacement, so that compressive/tensile distinction is correct const nvidia::NvVec3 assetBondNormal(blastBond.normal[0], blastBond.normal[1], blastBond.normal[2]); const nvidia::NvVec3 blastBondNormal = std::copysignf(1.0f, assetBondNormal.dot(nodeDisp))*assetBondNormal; const nvidia::NvVec3 blastBondCentroid(blastBond.centroid[0], blastBond.centroid[1], blastBond.centroid[2]); if (!canTakeDamage(remainingArea)) // Check unbreakable limit { totalArea = kUnbreakableLimit; // Don't add this in, in case of overflow bondNormal = blastBondNormal; bondCentroid = blastBondCentroid; averageNodeDisp = nodeDisp; break; } bondNormal += blastBondNormal*remainingArea; bondCentroid += blastBondCentroid*remainingArea; averageNodeDisp += nodeDisp*remainingArea; totalArea += remainingArea; } else { // if the bond is broken, try to remove it after processing is complete bondIndicesToRemove.pushBack(blastBondIndex); } } if (totalArea == 0.0f) { continue; } // normalized the aggregate normal now that all contributing bonds have been combined bondNormal.normalizeSafe(); // divide by total area for the weighted position, if the area is valid if (canTakeDamage(totalArea)) { bondCentroid /= totalArea; averageNodeDisp /= totalArea; } // bonds are looked at as a whole group, // so regardless of the current health of an individual one they are either all over stressed or none are float stressNormal, stressShear; calcSolverBondStresses(i, totalArea, averageNodeDisp.magnitude(), bondNormal, stressNormal, stressShear); NVBLAST_ASSERT(!std::isnan(stressNormal) && !std::isnan(stressShear)); if ( -stressNormal > settings.compressionElasticLimit || stressNormal > settings.tensionElasticLimit || stressShear > settings.shearElasticLimit ) { m_overstressedBondCount += blastBondIndices.size(); } // store the stress values for all the bonds involved for (auto blastBondIndex : blastBondIndices) { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; if (!isInvalidIndex(bondIndex) && bondHealth[blastBondIndex] > 0.0f) { BondData& bond = m_bondsData[bondIndex]; NVBLAST_ASSERT(getNodeData(bond.node0).solverNode != getNodeData(bond.node1).solverNode); NVBLAST_ASSERT(bond.blastBondIndex == blastBondIndex); bond.stressNormal = stressNormal; bond.stressShear = stressShear; // store the normal used to calc stresses so it can be used later to determine forces bond.normal = bondNormal; // store the bond centroid bond.centroid = bondCentroid; } } } // now that processing is done, remove any dead bonds for (uint32_t bondIndex : bondIndicesToRemove) { removeBondIfExists(bondIndex); } } void sync(const NvBlastBond* bonds) { if (m_nodesDirty) { syncNodes(bonds); m_solver.initialize(); } if (m_bondsDirty) { syncBonds(bonds); } CHECK_GRAPH_INTEGRITY; } void syncNodes(const NvBlastBond* bonds) { // init with 1<->1 blast nodes to solver nodes mapping m_solverNodesData.resize(m_nodesData.size()); for (uint32_t i = 0; i < m_nodesData.size(); ++i) { m_nodesData[i].solverNode = i; m_solverNodesData[i].supportNodesCount = 1; m_solverNodesData[i].indexShift = 0; } // for static nodes aggregate size per graph reduction level is lower, it // falls behind on few levels. (can be made as parameter) const uint32_t STATIC_NODES_COUNT_PENALTY = 2 << 2; // reducing graph by aggregating nodes level by level // NOTE (@anovoselov): Recently, I found a flow in the algorithm below. In very rare situations aggregate (solver node) // can contain more then one connected component. I didn't notice it to produce any visual artifacts and it's // unlikely to influence stress solvement a lot. Possible solution is to merge *whole* solver nodes, that // will raise complexity a bit (at least will add another loop on nodes for every reduction level. for (uint32_t k = 0; k < m_graphReductionLevel; k++) { const uint32_t maxAggregateSize = 1 << (k + 1); for (const BondData& bond : m_bondsData) { NodeData& node0 = m_nodesData[bond.node0]; NodeData& node1 = m_nodesData[bond.node1]; if (node0.solverNode == node1.solverNode) continue; SolverNodeData& solverNode0 = m_solverNodesData[node0.solverNode]; SolverNodeData& solverNode1 = m_solverNodesData[node1.solverNode]; const int countPenalty = 1; // This was being set to STATIC_NODES_COUNT_PENALTY for static nodes, may want to revisit const uint32_t aggregateSize = std::min<uint32_t>(maxAggregateSize, node0.neighborsCount / 2); if (solverNode0.supportNodesCount * countPenalty >= aggregateSize) continue; if (solverNode1.supportNodesCount * countPenalty >= aggregateSize) continue; if (solverNode0.supportNodesCount >= solverNode1.supportNodesCount) { solverNode1.supportNodesCount--; solverNode0.supportNodesCount++; node1.solverNode = node0.solverNode; } else if (solverNode1.supportNodesCount >= solverNode0.supportNodesCount) { solverNode1.supportNodesCount++; solverNode0.supportNodesCount--; node0.solverNode = node1.solverNode; } } } // Solver Nodes now sparse, a lot of empty ones. Rearrange them by moving all non-empty to the front // 2 passes used for that { uint32_t currentNode = 0; for (; currentNode < m_solverNodesData.size(); ++currentNode) { if (m_solverNodesData[currentNode].supportNodesCount > 0) continue; // 'currentNode' is free // search next occupied node uint32_t k = currentNode + 1; for (; k < m_solverNodesData.size(); ++k) { if (m_solverNodesData[k].supportNodesCount > 0) { // replace currentNode and keep indexShift m_solverNodesData[currentNode].supportNodesCount = m_solverNodesData[k].supportNodesCount; m_solverNodesData[k].indexShift = k - currentNode; m_solverNodesData[k].supportNodesCount = 0; break; } } if (k == m_solverNodesData.size()) { break; } } for (auto& node : m_nodesData) { node.solverNode -= m_solverNodesData[node.solverNode].indexShift; } // now, we know total solver nodes count and which nodes are aggregated into them m_solverNodesData.resize(currentNode); } // calculate all needed data for (SolverNodeData& solverNode : m_solverNodesData) { solverNode.supportNodesCount = 0; solverNode.localPos = NvVec3(NvZero); solverNode.mass = 0.0f; solverNode.volume = 0.0f; } for (NodeData& node : m_nodesData) { SolverNodeData& solverNode = m_solverNodesData[node.solverNode]; solverNode.supportNodesCount++; solverNode.localPos += node.localPos; solverNode.mass += node.mass; solverNode.volume += node.volume; } for (SolverNodeData& solverNode : m_solverNodesData) { solverNode.localPos /= (float)solverNode.supportNodesCount; } m_solver.reset(m_solverNodesData.size()); for (uint32_t nodeIndex = 0; nodeIndex < m_solverNodesData.size(); ++nodeIndex) { const SolverNodeData& solverNode = m_solverNodesData[nodeIndex]; const float R = NvPow(solverNode.volume * 3.0f * NvInvPi / 4.0f, 1.0f / 3.0f); // sphere volume approximation const float inertia = solverNode.mass * (R * R * 0.4f); // sphere inertia tensor approximation: I = 2/5 * M * R^2 ; invI = 1 / I; m_solver.setNodeMassInfo(nodeIndex, solverNode.localPos, solverNode.mass, inertia); } m_nodesDirty = false; syncBonds(bonds); } void syncBonds(const NvBlastBond* bonds) { // traverse all blast bonds and aggregate m_solver.clearBonds(); m_solverBondsMap.clear(); m_solverBondsData.clear(); for (BondData& bond : m_bondsData) { const NodeData& node0 = m_nodesData[bond.node0]; const NodeData& node1 = m_nodesData[bond.node1]; // reset stress, bond structure changed and internal bonds stress won't be updated during updateBondStress() bond.stressNormal = 0.0f; bond.stressShear = 0.0f; // initialize normal and centroid using blast values bond.normal = *(NvVec3*)bonds[bond.blastBondIndex].normal; bond.centroid = *(NvVec3*)bonds[bond.blastBondIndex].centroid; // fix normal direction to point from node0 to node1 bond.normal *= std::copysignf(1.0f, bond.normal.dot(node1.localPos - node1.localPos)); if (node0.solverNode == node1.solverNode) continue; // skip (internal) BondKey key(node0.solverNode, node1.solverNode); auto entry = m_solverBondsMap.find(key); SolverBondData* data; if (!entry) { m_solverBondsData.pushBack(SolverBondData()); data = &m_solverBondsData.back(); m_solverBondsMap[key] = m_solverBondsData.size() - 1; m_solver.addBond(node0.solverNode, node1.solverNode, bond.centroid); } else { data = &m_solverBondsData[entry->second]; } data->blastBondIndices.pushBack(bond.blastBondIndex); } m_bondsDirty = false; } #if GRAPH_INTERGRIRY_CHECK void checkGraphIntegrity() { NVBLAST_ASSERT(m_solver.getBondCount() == m_solverBondsData.size()); NVBLAST_ASSERT(m_solver.getNodeCount() == m_solverNodesData.size()); std::set<uint64_t> solverBonds; for (uint32_t i = 0; i < m_solverBondsData.size(); ++i) { const auto& bondData = m_solver.getBondData(i); BondKey key(bondData.node0, bondData.node1); NVBLAST_ASSERT(solverBonds.find(key) == solverBonds.end()); solverBonds.emplace(key); auto entry = m_solverBondsMap.find(key); NVBLAST_ASSERT(entry != nullptr); const auto& solverBond = m_solverBondsData[entry->second]; for (auto& blastBondIndex : solverBond.blastBondIndices) { if (!isInvalidIndex(m_blastBondIndexMap[blastBondIndex])) { auto& b = m_bondsData[m_blastBondIndexMap[blastBondIndex]]; BondKey key2(m_nodesData[b.node0].solverNode, m_nodesData[b.node1].solverNode); NVBLAST_ASSERT(key2 == key); } } } for (auto& solverBond : m_solverBondsData) { for (auto& blastBondIndex : solverBond.blastBondIndices) { if (!isInvalidIndex(m_blastBondIndexMap[blastBondIndex])) { auto& b = m_bondsData[m_blastBondIndexMap[blastBondIndex]]; NVBLAST_ASSERT(m_nodesData[b.node0].solverNode != m_nodesData[b.node1].solverNode); } } } uint32_t mappedBondCount = 0; for (uint32_t i = 0; i < m_blastBondIndexMap.size(); i++) { const auto& bondIndex = m_blastBondIndexMap[i]; if (!isInvalidIndex(bondIndex)) { mappedBondCount++; NVBLAST_ASSERT(m_bondsData[bondIndex].blastBondIndex == i); } } NVBLAST_ASSERT(m_bondsData.size() == mappedBondCount); } #endif struct BondKey { uint32_t node0; uint32_t node1; BondKey(uint32_t n0, uint32_t n1) : node0(n0), node1(n1) {} operator uint64_t() const { // Szudzik's function return node0 >= node1 ? (uint64_t)node0 * node0 + node0 + node1 : (uint64_t)node1 * node1 + node0; } }; ConjugateGradientImpulseSolver m_solver; Array<SolverNodeData>::type m_solverNodesData; Array<SolverBondData>::type m_solverBondsData; uint32_t m_graphReductionLevel; bool m_nodesDirty; bool m_bondsDirty; uint32_t m_overstressedBondCount; HashMap<BondKey, uint32_t>::type m_solverBondsMap; Array<uint32_t>::type m_blastBondIndexMap; Array<BondData>::type m_bondsData; Array<NodeData>::type m_nodesData; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ExtStressSolver /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /** */ class ExtStressSolverImpl final : public ExtStressSolver { NV_NOCOPY(ExtStressSolverImpl) public: ExtStressSolverImpl(const NvBlastFamily& family, const ExtStressSolverSettings& settings); virtual void release() override; //////// ExtStressSolverImpl interface //////// virtual void setAllNodesInfoFromLL(float density = 1.0f) override; virtual void setNodeInfo(uint32_t graphNode, float mass, float volume, NvcVec3 localPos) override; virtual void setSettings(const ExtStressSolverSettings& settings) override { m_settings = settings; inheritSettingsLimits(); } virtual const ExtStressSolverSettings& getSettings() const override { return m_settings; } virtual bool addForce(const NvBlastActor& actor, NvcVec3 localPosition, NvcVec3 localForce, ExtForceMode::Enum mode) override; virtual void addForce(uint32_t graphNode, NvcVec3 localForce, ExtForceMode::Enum mode) override; virtual bool addGravity(const NvBlastActor& actor, NvcVec3 localGravity) override; virtual bool addCentrifugalAcceleration(const NvBlastActor& actor, NvcVec3 localCenterMass, NvcVec3 localAngularVelocity) override; virtual void update() override; virtual uint32_t getOverstressedBondCount() const override { return m_graphProcessor->getOverstressedBondCount(); } virtual void generateFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) override; virtual uint32_t generateFractureCommandsPerActor(const NvBlastActor** actorBuffer, NvBlastFractureBuffers* commandsBuffer, uint32_t bufferSize) override; void reset() override { m_reset = true; } virtual float getStressErrorLinear() const override { return m_errorLinear; } virtual float getStressErrorAngular() const override { return m_errorAngular; } virtual bool converged() const override { return m_converged; } virtual uint32_t getFrameCount() const override { return m_framesCount; } virtual uint32_t getBondCount() const override { return m_graphProcessor->getSolverBondCount(); } virtual bool getExcessForces(uint32_t actorIndex, const NvcVec3& com, NvcVec3& force, NvcVec3& torque) override; virtual bool notifyActorCreated(const NvBlastActor& actor) override; virtual void notifyActorDestroyed(const NvBlastActor& actor) override; virtual const DebugBuffer fillDebugRender(const uint32_t* nodes, uint32_t nodeCount, DebugRenderMode mode, float scale) override; private: ~ExtStressSolverImpl(); //////// private methods //////// void solve(); void fillFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands); void initialize(); void iterate(); void removeBrokenBonds(); template<class T> T* getScratchArray(uint32_t size); bool generateStressDamage(const NvBlastActor& actor, uint32_t bondIndex, uint32_t node0, uint32_t node1); void inheritSettingsLimits() { NVBLAST_ASSERT(m_settings.compressionElasticLimit >= 0.0f && m_settings.compressionFatalLimit >= 0.0f); // check if any optional limits need to inherit from the compression values if (m_settings.tensionElasticLimit < 0.0f) { m_settings.tensionElasticLimit = m_settings.compressionElasticLimit; } if (m_settings.tensionFatalLimit < 0.0f) { m_settings.tensionFatalLimit = m_settings.compressionFatalLimit; } if (m_settings.shearElasticLimit < 0.0f) { m_settings.shearElasticLimit = m_settings.compressionElasticLimit; } if (m_settings.shearFatalLimit < 0.0f) { m_settings.shearFatalLimit = m_settings.compressionFatalLimit; } } //////// data //////// const NvBlastFamily& m_family; HashSet<const NvBlastActor*>::type m_activeActors; ExtStressSolverSettings m_settings; NvBlastSupportGraph m_graph; bool m_isDirty; bool m_reset; const float* m_bondHealths; const float* m_cachedBondHealths; const NvBlastBond* m_bonds; SupportGraphProcessor* m_graphProcessor; float m_errorAngular; float m_errorLinear; bool m_converged; uint32_t m_framesCount; Array<NvBlastBondFractureData>::type m_bondFractureBuffer; Array<uint8_t>::type m_scratch; Array<DebugLine>::type m_debugLineBuffer; }; template<class T> NV_INLINE T* ExtStressSolverImpl::getScratchArray(uint32_t size) { const uint32_t scratchSize = sizeof(T) * size; if (m_scratch.size() < scratchSize) { m_scratch.resize(scratchSize); } return reinterpret_cast<T*>(m_scratch.begin()); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Creation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ExtStressSolverImpl::ExtStressSolverImpl(const NvBlastFamily& family, const ExtStressSolverSettings& settings) : m_family(family), m_settings(settings), m_isDirty(false), m_reset(false), m_errorAngular(std::numeric_limits<float>::max()), m_errorLinear(std::numeric_limits<float>::max()), m_converged(false), m_framesCount(0) { // this needs to be called any time settings change, including when they are first set inheritSettingsLimits(); const NvBlastAsset* asset = NvBlastFamilyGetAsset(&m_family, logLL); NVBLAST_ASSERT(asset); m_graph = NvBlastAssetGetSupportGraph(asset, logLL); const uint32_t bondCount = NvBlastAssetGetBondCount(asset, logLL); m_bondFractureBuffer.reserve(bondCount); { NvBlastActor* actor; NvBlastFamilyGetActors(&actor, 1, &family, logLL); m_bondHealths = NvBlastActorGetBondHealths(actor, logLL); m_cachedBondHealths = NvBlastActorGetCachedBondHeaths(actor, logLL); m_bonds = NvBlastAssetGetBonds(asset, logLL); } m_graphProcessor = NVBLAST_NEW(SupportGraphProcessor)(m_graph.nodeCount, bondCount); // traverse graph and fill bond info for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0) { for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++) { uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex]; if (m_bondHealths[bondIndex] <= 0.0f) continue; uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex]; if (node0 < node1) { m_graphProcessor->addBond(node0, node1, bondIndex); } } } } ExtStressSolverImpl::~ExtStressSolverImpl() { NVBLAST_DELETE(m_graphProcessor, SupportGraphProcessor); } ExtStressSolver* ExtStressSolver::create(const NvBlastFamily& family, const ExtStressSolverSettings& settings) { return NVBLAST_NEW(ExtStressSolverImpl) (family, settings); } void ExtStressSolverImpl::release() { NVBLAST_DELETE(this, ExtStressSolverImpl); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Actors & Graph Data /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ExtStressSolverImpl::setAllNodesInfoFromLL(float density) { const NvBlastAsset* asset = NvBlastFamilyGetAsset(&m_family, logLL); NVBLAST_ASSERT(asset); const uint32_t chunkCount = NvBlastAssetGetChunkCount(asset, logLL); const NvBlastChunk* chunks = NvBlastAssetGetChunks(asset, logLL); // traverse graph and fill node info for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0) { const uint32_t chunkIndex0 = m_graph.chunkIndices[node0]; if (chunkIndex0 >= chunkCount) { // chunkIndex is invalid means it is static node (represents world) m_graphProcessor->setNodeInfo(node0, 0.0f, 0.0f, NvVec3(NvZero)); } else { // fill node info const NvBlastChunk& chunk = chunks[chunkIndex0]; const float volume = chunk.volume; const float mass = volume * density; const NvVec3 localPos = *reinterpret_cast<const NvVec3*>(chunk.centroid); m_graphProcessor->setNodeInfo(node0, mass, volume, localPos); } } } void ExtStressSolverImpl::setNodeInfo(uint32_t graphNode, float mass, float volume, NvcVec3 localPos) { m_graphProcessor->setNodeInfo(graphNode, mass, volume, toNvShared(localPos)); } bool ExtStressSolverImpl::getExcessForces(uint32_t actorIndex, const NvcVec3& com, NvcVec3& force, NvcVec3& torque) { // otherwise allocate enough space and query the Blast SDK const NvBlastActor* actor = NvBlastFamilyGetActorByIndex(&m_family, actorIndex, logLL); if (actor == nullptr) { return false; } const uint32_t nodeCount = NvBlastActorGetGraphNodeCount(actor, logLL); uint32_t* nodeIndices = getScratchArray<uint32_t>(nodeCount); const uint32_t retCount = NvBlastActorGetGraphNodeIndices(nodeIndices, nodeCount, actor, logLL); NVBLAST_ASSERT(retCount == nodeCount); // get the mapping between support chunks and actor indices // this is the fastest way to tell if two node/chunks are part of the same actor const uint32_t* actorIndices = NvBlastFamilyGetChunkActorIndices(&m_family, logLL); if (actorIndices == nullptr) { return false; } // walk the visible nodes for the actor looking for bonds that broke this frame nvidia::NvVec3 totalForce(0.0f); nvidia::NvVec3 totalTorque(0.0f); for (uint32_t n = 0; n < nodeCount; n++) { // find bonds that broke this frame (health <= 0 but internal stress bond index is still valid) const uint32_t nodeIdx = nodeIndices[n]; for (uint32_t i = m_graph.adjacencyPartition[nodeIdx]; i < m_graph.adjacencyPartition[nodeIdx + 1]; i++) { // check if the bond is broken first of all const uint32_t blastBondIndex = m_graph.adjacentBondIndices[i]; if (m_bondHealths[blastBondIndex] > 0.0f) { continue; } // broken bonds that have invalid internal indices broke before this frame const uint32_t internalBondIndex = m_graphProcessor->getInternalBondIndex(blastBondIndex); if (isInvalidIndex(internalBondIndex)) { continue; } // make sure the other node in the bond isn't part of the same actor // forces should only be applied due to bonds breaking between actors, not within const uint32_t chunkIdx = m_graph.chunkIndices[nodeIdx]; const uint32_t otherNodeIdx = m_graph.adjacentNodeIndices[i]; const uint32_t otherChunkIdx = m_graph.chunkIndices[otherNodeIdx]; if (!isInvalidIndex(chunkIdx) && !isInvalidIndex(otherChunkIdx) && actorIndices[chunkIdx] == actorIndices[otherChunkIdx]) { continue; } // this bond should contribute forces to the output const auto bondData = m_graphProcessor->getBondData(internalBondIndex); NVBLAST_ASSERT(blastBondIndex == bondData.blastBondIndex); uint32_t node0, node1; m_graphProcessor->getSolverInternalBondNodes(internalBondIndex, node0, node1); NVBLAST_ASSERT(bondData.node0 == internalBondData.node0 && bondData.node1 == internalBondData.node1); // accumulators for forces just from this bond nvidia::NvVec3 nvLinearPressure(0.0f); nvidia::NvVec3 nvAngularPressure(0.0f); // deal with linear forces const float excessCompression = bondData.stressNormal + m_settings.compressionFatalLimit; const float excessTension = bondData.stressNormal - m_settings.tensionFatalLimit; if (excessCompression < 0.0f) { nvLinearPressure += excessCompression * bondData.normal; } else if (excessTension > 0.0f) { // tension is in the negative direction of the linear impulse nvLinearPressure += excessTension * bondData.normal; } const float excessShear = bondData.stressShear - m_settings.shearFatalLimit; if (excessShear > 0.0f) { NvVec3 impulseLinear, impulseAngular; m_graphProcessor->getSolverInternalBondImpulses(internalBondIndex, impulseLinear, impulseAngular); const nvidia::NvVec3 shearDir = impulseLinear - impulseLinear.dot(bondData.normal)*bondData.normal; nvLinearPressure += excessShear * shearDir.getNormalized(); } if (nvLinearPressure.magnitudeSquared() > FLT_EPSILON) { const float* bondCenter = m_bonds[blastBondIndex].centroid; const nvidia::NvVec3 forceOffset = nvidia::NvVec3(bondCenter[0], bondCenter[1], bondCenter[3]) - toNvShared(com); const nvidia::NvVec3 torqueFromForce = forceOffset.cross(nvLinearPressure); nvAngularPressure += torqueFromForce; } // add the contributions from this bond to the total forces for the actor // multiply by the area to convert back to force from pressure const float bondRemainingArea = m_cachedBondHealths[blastBondIndex]; NVBLAST_ASSERT(bondRemainingArea <= m_bonds[blastBondIndex].area); const float sign = otherNodeIdx > nodeIdx ? 1.0f : -1.0f; totalForce += nvLinearPressure * (sign*bondRemainingArea); totalTorque += nvAngularPressure * (sign*bondRemainingArea); } } // convert to the output format and return true if non-zero forces were accumulated force = fromNvShared(totalForce); torque = fromNvShared(totalTorque); return (totalForce.magnitudeSquared() + totalTorque.magnitudeSquared()) > 0.0f; } bool ExtStressSolverImpl::notifyActorCreated(const NvBlastActor& actor) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { // update neighbors { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { m_graphProcessor->setNodeNeighborsCount(graphNodeIndices[i], nodeCount); } } m_activeActors.insert(&actor); m_isDirty = true; return true; } return false; } void ExtStressSolverImpl::notifyActorDestroyed(const NvBlastActor& actor) { if (m_activeActors.erase(&actor)) { m_isDirty = true; } } void ExtStressSolverImpl::removeBrokenBonds() { // traverse graph and remove dead bonds for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0) { for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++) { uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex]; if (node0 < node1) { uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex]; if (m_bondHealths[bondIndex] <= 0.0f) { m_graphProcessor->removeBondIfExists(bondIndex); } } } } m_isDirty = false; } void ExtStressSolverImpl::initialize() { if (m_reset) { m_framesCount = 0; } if (m_isDirty) { removeBrokenBonds(); } if (m_settings.graphReductionLevel != m_graphProcessor->getGraphReductionLevel()) { m_graphProcessor->setGraphReductionLevel(m_settings.graphReductionLevel); } } bool ExtStressSolverImpl::addForce(const NvBlastActor& actor, NvcVec3 localPosition, NvcVec3 localForce, ExtForceMode::Enum mode) { float bestDist = FLT_MAX; uint32_t bestNode = invalidIndex<uint32_t>(); const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node = graphNodeIndices[i]; const float sqrDist = (toNvShared(localPosition) - m_graphProcessor->getNodeData(node).localPos).magnitudeSquared(); if (sqrDist < bestDist) { bestDist = sqrDist; bestNode = node; } } if (!isInvalidIndex(bestNode)) { m_graphProcessor->addNodeForce(bestNode, toNvShared(localForce), mode); return true; } } return false; } void ExtStressSolverImpl::addForce(uint32_t graphNode, NvcVec3 localForce, ExtForceMode::Enum mode) { m_graphProcessor->addNodeForce(graphNode, toNvShared(localForce), mode); } bool ExtStressSolverImpl::addGravity(const NvBlastActor& actor, NvcVec3 localGravity) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node = graphNodeIndices[i]; m_graphProcessor->addNodeForce(node, toNvShared(localGravity), ExtForceMode::ACCELERATION); } return true; } return false; } bool ExtStressSolverImpl::addCentrifugalAcceleration(const NvBlastActor& actor, NvcVec3 localCenterMass, NvcVec3 localAngularVelocity) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); // Apply centrifugal force for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node = graphNodeIndices[i]; const auto& localPos = m_graphProcessor->getNodeData(node).localPos; // a = w x (w x r) const NvVec3 centrifugalAcceleration = toNvShared(localAngularVelocity) .cross(toNvShared(localAngularVelocity).cross(localPos - toNvShared(localCenterMass))); m_graphProcessor->addNodeForce(node, centrifugalAcceleration, ExtForceMode::ACCELERATION); } return true; } return false; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Update /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ExtStressSolverImpl::update() { initialize(); solve(); m_framesCount++; } void ExtStressSolverImpl::solve() { NV_SIMD_GUARD; m_graphProcessor->solve(m_settings, m_bondHealths, m_bonds, WARM_START && !m_reset); m_reset = false; m_converged = m_graphProcessor->calcError(m_errorLinear, m_errorAngular); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Damage /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // check if this bond is over stressed in any way and generate a fracture command if it is bool ExtStressSolverImpl::generateStressDamage(const NvBlastActor& actor, uint32_t bondIndex, uint32_t node0, uint32_t node1) { const float bondHealth = m_bondHealths[bondIndex]; float stressCompression, stressTension, stressShear; if (bondHealth > 0.0f && m_graphProcessor->getBondStress(bondIndex, stressCompression, stressTension, stressShear)) { // compression and tension are mutually exclusive, only one can be positive at a time since they act in opposite directions float stressMultiplier = 0.0f; if (stressCompression > m_settings.compressionElasticLimit) { const float excessStress = stressCompression - m_settings.compressionElasticLimit; const float compressionDenom = m_settings.compressionFatalLimit - m_settings.compressionElasticLimit; const float compressionMultiplier = excessStress / (compressionDenom > 0.0f ? compressionDenom : 1.0f); stressMultiplier += compressionMultiplier; } else if (stressTension > m_settings.tensionElasticLimit) { const float excessStress = stressTension - m_settings.tensionElasticLimit; const float tensionDenom = m_settings.tensionFatalLimit - m_settings.tensionElasticLimit; const float tensionMultiplier = excessStress / (tensionDenom > 0.0f ? tensionDenom : 1.0f); stressMultiplier += tensionMultiplier; } // shear can co-exist with either compression or tension so must be accounted for independently of them if (stressShear > m_settings.shearElasticLimit) { const float excessStress = stressShear - m_settings.shearElasticLimit; const float shearDenom = m_settings.shearFatalLimit - m_settings.shearElasticLimit; const float shearMultiplier = excessStress / (shearDenom > 0.0f ? shearDenom : 1.0f); stressMultiplier += shearMultiplier; } if (stressMultiplier > 0.0f) { // bond health/area is reduced by excess pressure to approximate micro bonds in the material breaking const float bondDamage = bondHealth * stressMultiplier; const NvBlastBondFractureData data = { 0, node0, node1, bondDamage }; m_bondFractureBuffer.pushBack(data); // cache off the current health value for this bond // so it can be used to calculate forces to apply if it breaks later NvBlastActorCacheBondHeath(&actor, bondIndex, logLL); return true; } } return false; } void ExtStressSolverImpl::fillFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); uint32_t commandCount = 0; if (graphNodeCount > 1 && m_graphProcessor->getOverstressedBondCount() > 0) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node0 = graphNodeIndices[i]; for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++) { const uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex]; if (node0 < node1) { const uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex]; if (generateStressDamage(actor, bondIndex, node0, node1)) { commandCount++; } } } } } commands.chunkFractureCount = 0; commands.chunkFractures = nullptr; commands.bondFractureCount = commandCount; commands.bondFractures = commandCount > 0 ? m_bondFractureBuffer.end() - commandCount : nullptr; } void ExtStressSolverImpl::generateFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) { m_bondFractureBuffer.clear(); fillFractureCommands(actor, commands); } uint32_t ExtStressSolverImpl::generateFractureCommandsPerActor(const NvBlastActor** actorBuffer, NvBlastFractureBuffers* commandsBuffer, uint32_t bufferSize) { if (m_graphProcessor->getOverstressedBondCount() == 0) return 0; m_bondFractureBuffer.clear(); uint32_t index = 0; for (auto it = m_activeActors.getIterator(); !it.done() && index < bufferSize; ++it) { const NvBlastActor* actor = *it; NvBlastFractureBuffers& nextCommand = commandsBuffer[index]; fillFractureCommands(*actor, nextCommand); if (nextCommand.bondFractureCount > 0) { actorBuffer[index] = actor; index++; } } return index; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Debug Render /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// inline uint32_t NvVec4ToU32Color(const NvVec4& color) { return ((uint32_t)(color.w * 255) << 24) | // A ((uint32_t)(color.x * 255) << 16) | // R ((uint32_t)(color.y * 255) << 8) | // G ((uint32_t)(color.z * 255)); // B } static float Lerp(float v0, float v1, float val) { return v0 * (1 - val) + v1 * val; } inline float clamp01(float v) { return v < 0.0f ? 0.0f : (v > 1.0f ? 1.0f : v); } inline NvVec4 colorConvertHSVAtoRGBA(float h, float s, float v, float a) { const float t = 6.0f * (h - std::floor(h)); const int n = (int)t; const float m = t - (float)n; const float c = 1.0f - s; const float b[6] = { 1.0f, 1.0f - s * m, c, c, 1.0f - s * (1.0f - m), 1.0f }; return NvVec4(v * b[n % 6], v * b[(n + 4) % 6], v * b[(n + 2) % 6], a); // n % 6 protects against roundoff errors } inline uint32_t bondHealthColor(float stressPct) { stressPct = clamp01(stressPct); constexpr float BOND_HEALTHY_HUE = 1.0f/3.0f; // Green constexpr float BOND_ELASTIC_HUE = 0.0f; // Red constexpr float BOND_STRESSED_HUE = 2.0f/3.0f; // Blue constexpr float BOND_FATAL_HUE = 5.0f/6.0f; // Magenta const float hue = stressPct < 0.5f ? Lerp(BOND_HEALTHY_HUE, BOND_ELASTIC_HUE, 2.0f * stressPct) : Lerp(BOND_STRESSED_HUE, BOND_FATAL_HUE, 2.0f * stressPct - 1.0f); return NvVec4ToU32Color(colorConvertHSVAtoRGBA(hue, 1.0f, 1.0f, 1.0f)); } const ExtStressSolver::DebugBuffer ExtStressSolverImpl::fillDebugRender(const uint32_t* nodes, uint32_t nodeCount, DebugRenderMode mode, float scale) { NV_UNUSED(scale); const uint32_t BOND_UNBREAKABLE_COLOR = NvVec4ToU32Color(NvVec4(0.0f, 0.682f, 1.0f, 1.0f)); ExtStressSolver::DebugBuffer debugBuffer = { nullptr, 0 }; if (m_isDirty) return debugBuffer; m_debugLineBuffer.clear(); Array<uint8_t>::type& nodesSet = m_scratch; nodesSet.resize(m_graphProcessor->getSolverNodeCount()); memset(nodesSet.begin(), 0, nodesSet.size() * sizeof(uint8_t)); for (uint32_t i = 0; i < nodeCount; ++i) { NVBLAST_ASSERT(m_graphProcessor->getNodeData(nodes[i]).solverNode < nodesSet.size()); nodesSet[m_graphProcessor->getNodeData(nodes[i]).solverNode] = 1; } const uint32_t bondCount = m_graphProcessor->getSolverBondCount(); for (uint32_t i = 0; i < bondCount; ++i) { const auto& bondData = m_graphProcessor->getBondData(i); uint32_t node0, node1; m_graphProcessor->getSolverInternalBondNodes(i, node0, node1); if (nodesSet[node0] != 0) { //NVBLAST_ASSERT(nodesSet[node1] != 0); const auto& solverNode0 = m_graphProcessor->getSolverNodeData(node0); const auto& solverNode1 = m_graphProcessor->getSolverNodeData(node1); const NvcVec3 p0 = fromNvShared(solverNode0.mass > 0.0f ? solverNode0.localPos : bondData.centroid); const NvcVec3 p1 = fromNvShared(solverNode1.mass > 0.0f ? solverNode1.localPos : bondData.centroid); // don't render lines for broken bonds const float stressPct = m_graphProcessor->getSolverBondStressPct(i, m_bondHealths, m_settings, mode); if (stressPct >= 0.0f) { const uint32_t color = canTakeDamage(m_bondHealths[bondData.blastBondIndex]) ? bondHealthColor(stressPct) : BOND_UNBREAKABLE_COLOR; m_debugLineBuffer.pushBack(DebugLine(p0, p1, color)); } } } debugBuffer.lines = m_debugLineBuffer.begin(); debugBuffer.lineCount = m_debugLineBuffer.size(); return debugBuffer; } } // namespace Blast } // namespace Nv
68,856
C++
37.596973
181
0.586979
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageAcceleratorAABBTree.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtDamageAcceleratorAABBTree.h" #include "NvBlastIndexFns.h" #include "NvBlastAssert.h" #include "NvVec4.h" #include <algorithm> using namespace nvidia; namespace Nv { namespace Blast { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Creation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ExtDamageAcceleratorAABBTree* ExtDamageAcceleratorAABBTree::create(const NvBlastAsset* asset) { ExtDamageAcceleratorAABBTree* tree = NVBLAST_NEW(Nv::Blast::ExtDamageAcceleratorAABBTree) (); tree->build(asset); return tree; } void ExtDamageAcceleratorAABBTree::release() { NVBLAST_DELETE(this, ExtDamageAcceleratorAABBTree); } void ExtDamageAcceleratorAABBTree::build(const NvBlastAsset* asset) { NVBLAST_ASSERT(m_root == nullptr); const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, logLL); const NvBlastBond* bonds = NvBlastAssetGetBonds(asset, logLL); const NvBlastChunk* chunks = NvBlastAssetGetChunks(asset, logLL); const uint32_t N = NvBlastAssetGetBondCount(asset, logLL); m_indices.resizeUninitialized(N); m_points.resizeUninitialized(N); m_segments.resizeUninitialized(N); m_bonds.resizeUninitialized(N); m_nodes.reserve(2 * N); for (uint32_t node0 = 0; node0 < graph.nodeCount; ++node0) { for (uint32_t j = graph.adjacencyPartition[node0]; j < graph.adjacencyPartition[node0 + 1]; ++j) { uint32_t bondIndex = graph.adjacentBondIndices[j]; uint32_t node1 = graph.adjacentNodeIndices[j]; if (node0 < node1) { const NvBlastBond& bond = bonds[bondIndex]; const NvVec3& p = (reinterpret_cast<const NvVec3&>(bond.centroid)); m_points[bondIndex] = p; m_indices[bondIndex] = bondIndex; m_bonds[bondIndex].node0 = node0; m_bonds[bondIndex].node1 = node1; // filling bond segments as a connection of 2 chunk centroids const uint32_t chunk0 = graph.chunkIndices[node0]; const uint32_t chunk1 = graph.chunkIndices[node1]; if (isInvalidIndex(chunk1)) { // for world node we don't have it's centroid, so approximate with projection on bond normal m_segments[bondIndex].p0 = (reinterpret_cast<const NvVec3&>(chunks[chunk0].centroid)); const NvVec3 normal = (reinterpret_cast<const NvVec3&>(bond.normal)); m_segments[bondIndex].p1 = m_segments[bondIndex].p0 + normal * (p - m_segments[bondIndex].p0).dot(normal) * 2; } else { m_segments[bondIndex].p0 = (reinterpret_cast<const NvVec3&>(chunks[chunk0].centroid)); m_segments[bondIndex].p1 = (reinterpret_cast<const NvVec3&>(chunks[chunk1].centroid)); } } } } int rootIndex = N > 0 ? createNode(0, N - 1, 0) : -1; m_root = rootIndex >= 0 ? &m_nodes[rootIndex] : nullptr; } int ExtDamageAcceleratorAABBTree::createNode(uint32_t startIdx, uint32_t endIdx, uint32_t depth) { if (startIdx > endIdx) return -1; Node node; node.first = startIdx; node.last = endIdx; // calc node bounds node.pointsBound = NvBounds3::empty(); node.segmentsBound = NvBounds3::empty(); for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; node.pointsBound.include(m_points[idx]); node.segmentsBound.include(m_segments[idx].p0); node.segmentsBound.include(m_segments[idx].p1); } // select axis of biggest extent const NvVec3 ext = node.pointsBound.getExtents(); uint32_t axis = 0; for (uint32_t k = 1; k < 3; k++) { if (ext[k] > ext[axis]) { axis = k; } } // split on selected axis and partially sort around the middle const uint32_t mid = startIdx + (endIdx - startIdx) / 2; std::nth_element(m_indices.begin() + startIdx, m_indices.begin() + mid, m_indices.begin() + endIdx + 1, [&](uint32_t lhs, uint32_t rhs) { return m_points[lhs][axis] < m_points[rhs][axis]; }); const uint32_t BUCKET = 32; if (endIdx - startIdx > BUCKET && mid > startIdx && mid < endIdx) { node.child[0] = createNode(startIdx, mid, depth + 1); node.child[1] = createNode(mid + 1, endIdx, depth + 1); } else { node.child[0] = -1; node.child[1] = -1; } m_nodes.pushBack(node); return m_nodes.size() - 1; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Queries /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ExtDamageAcceleratorAABBTree::findInBounds(const nvidia::NvBounds3& bounds, ResultCallback& callback, bool segments) const { if (m_root) { if (segments) findSegmentsInBounds(*m_root, callback, bounds); else findPointsInBounds(*m_root, callback, bounds); callback.dispatch(); } } void ExtDamageAcceleratorAABBTree::findPointsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const { if (!bounds.intersects(node.pointsBound)) { return; } // if search bound contains node bound, simply add all point indexes. if (node.pointsBound.isInside(bounds)) { for (uint32_t i = node.first; i <= node.last; i++) pushResult(callback, m_indices[i]); return; // early pruning. } if (node.child[0] < 0) { for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; if (bounds.contains(m_points[idx])) pushResult(callback, idx); } return; } // check whether child nodes are in range. for (uint32_t c = 0; c < 2; ++c) { findPointsInBounds(m_nodes[node.child[c]], callback, bounds); } } void ExtDamageAcceleratorAABBTree::findSegmentsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const { if (!bounds.intersects(node.segmentsBound)) { return; } // if search bound contains node bound, simply add all point indexes. if (node.segmentsBound.isInside(bounds)) { for (uint32_t i = node.first; i <= node.last; i++) pushResult(callback, m_indices[i]); return; // early pruning. } if (node.child[0] < 0) { for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; if (bounds.contains(m_segments[idx].p0) || bounds.contains(m_segments[idx].p1)) pushResult(callback, idx); } return; } // check whether child nodes are in range. for (uint32_t c = 0; c < 2; ++c) { findSegmentsInBounds(m_nodes[node.child[c]], callback, bounds); } } bool intersectSegmentPlane(const NvVec3& v1, const NvVec3& v2, const NvPlane& p) { const bool s1 = p.distance(v1) > 0.f; const bool s2 = p.distance(v2) > 0.f; return (s1 && !s2) || (s2 && !s1); } bool intersectBoundsPlane(const NvBounds3& b, const NvPlane& p) { const NvVec3 extents = b.getExtents(); const NvVec3 center = b.getCenter(); float r = extents.x * NvAbs(p.n.x) + extents.y * NvAbs(p.n.y) + extents.z * NvAbs(p.n.z); float s = p.n.dot(center) + p.d; return NvAbs(s) <= r; } void ExtDamageAcceleratorAABBTree::findBondSegmentsPlaneIntersected(const nvidia::NvPlane& plane, ResultCallback& resultCallback) const { if (m_root) { findSegmentsPlaneIntersected(*m_root, resultCallback, plane); resultCallback.dispatch(); } } void ExtDamageAcceleratorAABBTree::findSegmentsPlaneIntersected(const Node& node, ResultCallback& callback, const nvidia::NvPlane& plane) const { if (!intersectBoundsPlane(node.segmentsBound, plane)) { return; } if (node.child[0] < 0) { for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; if (intersectSegmentPlane(m_segments[idx].p0, m_segments[idx].p1, plane)) pushResult(callback, idx); } return; } // check whether child nodes are in range. for (uint32_t c = 0; c < 2; ++c) { findSegmentsPlaneIntersected(m_nodes[node.child[c]], callback, plane); } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Debug Render /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// inline uint32_t NvVec4ToU32Color(const NvVec4& color) { uint32_t c = 0; c |= (int)(color.w * 255); c <<= 8; c |= (int)(color.z * 255); c <<= 8; c |= (int)(color.y * 255); c <<= 8; c |= (int)(color.x * 255); return c; } Nv::Blast::DebugBuffer ExtDamageAcceleratorAABBTree::fillDebugRender(int depth, bool segments) { Nv::Blast::DebugBuffer debugBuffer = { nullptr, 0 }; m_debugLineBuffer.clear(); if (m_root) { fillDebugBuffer(*m_root, 0, depth, segments); } debugBuffer.lines = m_debugLineBuffer.begin(); debugBuffer.lineCount = m_debugLineBuffer.size(); return debugBuffer; } void ExtDamageAcceleratorAABBTree::fillDebugBuffer(const Node& node, int currentDepth, int depth, bool segments) { if (depth < 0 || currentDepth == depth) { const NvVec4 LEAF_COLOR(1.0f, 1.0f, 1.0f, 1.0f); const NvVec4 NON_LEAF_COLOR(0.3f, 0.3f, 0.3f, 1.0f); // draw box const NvBounds3 bounds = segments ? node.segmentsBound : node.pointsBound; const NvVec3 center = bounds.getCenter(); const NvVec3 extents = bounds.getExtents(); const int vs[] = { 0,3,5,6 }; for (int i = 0; i < 4; i++) { int v = vs[i]; for (int d = 1; d < 8; d <<= 1) { auto flip = [](int x, int k) { return ((x >> k) & 1) * 2.f - 1.f; }; const float s = std::pow(0.99f, currentDepth); NvVec3 p0 = center + s * extents.multiply(NvVec3(flip(v, 0), flip(v, 1), flip(v, 2))); NvVec3 p1 = center + s * extents.multiply(NvVec3(flip(v^d, 0), flip(v^d, 1), flip(v^d, 2))); m_debugLineBuffer.pushBack(Nv::Blast::DebugLine( reinterpret_cast<NvcVec3&>(p0), reinterpret_cast<NvcVec3&>(p1), NvVec4ToU32Color(LEAF_COLOR * (1.f - (currentDepth + 1) * 0.1f))) ); } } } for (uint32_t i = 0; i < 2; ++i) { if (node.child[i] >= 0) { fillDebugBuffer(m_nodes[node.child[i]], currentDepth + 1, depth, segments); } } } } // namespace Blast } // namespace Nv
12,983
C++
33.168421
143
0.571594
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageAcceleratorAABBTree.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtDamageAcceleratorInternal.h" #include "NvBlast.h" #include "NvBlastArray.h" namespace Nv { namespace Blast { class ExtDamageAcceleratorAABBTree final : public ExtDamageAcceleratorInternal { public: //////// ctor //////// ExtDamageAcceleratorAABBTree() : m_root(nullptr) { } virtual ~ExtDamageAcceleratorAABBTree() { } static ExtDamageAcceleratorAABBTree* create(const NvBlastAsset* asset); //////// interface //////// virtual void release() override; virtual void findBondCentroidsInBounds(const nvidia::NvBounds3& bounds, ResultCallback& resultCallback) const override { const_cast<ExtDamageAcceleratorAABBTree*>(this)->findInBounds(bounds, resultCallback, false); } virtual void findBondSegmentsInBounds(const nvidia::NvBounds3& bounds, ResultCallback& resultCallback) const override { const_cast<ExtDamageAcceleratorAABBTree*>(this)->findInBounds(bounds, resultCallback, true); } virtual void findBondSegmentsPlaneIntersected(const nvidia::NvPlane& plane, ResultCallback& resultCallback) const override; virtual Nv::Blast::DebugBuffer fillDebugRender(int depth, bool segments) override; virtual void* getImmediateScratch(size_t size) override { m_scratch.resizeUninitialized(size); return m_scratch.begin(); } private: // no copy/assignment ExtDamageAcceleratorAABBTree(ExtDamageAcceleratorAABBTree&); ExtDamageAcceleratorAABBTree& operator=(const ExtDamageAcceleratorAABBTree& tree); // Tree node struct Node { int child[2]; uint32_t first; uint32_t last; nvidia::NvBounds3 pointsBound; nvidia::NvBounds3 segmentsBound; }; void build(const NvBlastAsset* asset); int createNode(uint32_t startIdx, uint32_t endIdx, uint32_t depth); void pushResult(ResultCallback& callback, uint32_t pointIndex) const { callback.push(pointIndex, m_bonds[pointIndex].node0, m_bonds[pointIndex].node1); } void findInBounds(const nvidia::NvBounds3& bounds, ResultCallback& callback, bool segments) const; void findPointsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const; void findSegmentsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const; void findSegmentsPlaneIntersected(const Node& node, ResultCallback& callback, const nvidia::NvPlane& plane) const; void fillDebugBuffer(const Node& node, int currentDepth, int depth, bool segments); //////// data //////// Node* m_root; Array<Node>::type m_nodes; Array<uint32_t>::type m_indices; Array<nvidia::NvVec3>::type m_points; struct Segment { nvidia::NvVec3 p0; nvidia::NvVec3 p1; }; Array<Segment>::type m_segments; struct BondData { uint32_t node0; uint32_t node1; }; Array<BondData>::type m_bonds; Array<Nv::Blast::DebugLine>::type m_debugLineBuffer; Array<char>::type m_scratch; }; } // namespace Blast } // namespace Nv
4,860
C
31.844594
127
0.695885
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageShaders.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtDamageShaders.h" #include "NvBlastExtDamageAcceleratorInternal.h" #include "NvBlastIndexFns.h" #include "NvBlastMath.h" #include "NvBlastGeometry.h" #include "NvBlastAssert.h" #include "NvBlastFixedQueue.h" #include "NvBlastFixedBitmap.h" #include "NvBlast.h" #include <cmath> // for abs() on linux #include <new> using namespace Nv::Blast; using namespace Nv::Blast::VecMath; using namespace nvidia; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Profiles /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef float(*ProfileFunction)(float, float, float, float); float falloffProfile(float min, float max, float x, float f = 1.0f) { if (x > max) return 0.0f; if (x < min) return f; float y = 1.0f - (x - min) / (max - min); return y * f; } float cutterProfile(float min, float max, float x, float f = 1.0f) { if (x > max || x < min) return 0.0f; return f; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Damage Functions /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef float(*DamageFunction)(const float pos[3], const void* damageDescBuffer); template <ProfileFunction profileFn, typename DescT = NvBlastExtRadialDamageDesc> float pointDistanceDamage(const float pos[3], const void* damageDescBuffer) { const DescT& desc = *static_cast<const DescT*>(damageDescBuffer); float relativePosition[3]; sub(desc.position, pos, relativePosition); const float distance = sqrtf(dot(relativePosition, relativePosition)); const float damage = profileFn(desc.minRadius, desc.maxRadius, distance, desc.damage); return damage; } // Distance from point 'p' to line segment '(a, b)' float distanceToSegment(const float p[3], const float a[3], const float b[3]) { float v[3]; sub(b, a, v); float w[3]; sub(p, a, w); const float c1 = dot(v, w); if (c1 <= 0) return length(w); const float c2 = dot(v, v); if (c2 < c1) return dist(p, b); const float t = c1 / c2; mul(v, t); return dist(v, w); } template <ProfileFunction profileFn> float capsuleDistanceDamage(const float pos[3], const void* damageDesc) { const NvBlastExtCapsuleRadialDamageDesc& desc = *static_cast<const NvBlastExtCapsuleRadialDamageDesc*>(damageDesc); const float distance = distanceToSegment(pos, desc.position0, desc.position1); const float damage = profileFn(desc.minRadius, desc.maxRadius, distance, desc.damage); return damage; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // AABB Functions /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef NvBounds3(*BoundFunction)(const void* damageDesc); NvBounds3 sphereBounds(const void* damageDesc) { const NvBlastExtRadialDamageDesc& desc = *static_cast<const NvBlastExtRadialDamageDesc*>(damageDesc); const nvidia::NvVec3& p = (reinterpret_cast<const nvidia::NvVec3&>(desc.position)); return nvidia::NvBounds3::centerExtents(p, nvidia::NvVec3(desc.maxRadius, desc.maxRadius, desc.maxRadius)); } NvBounds3 capsuleBounds(const void* damageDesc) { const NvBlastExtCapsuleRadialDamageDesc& desc = *static_cast<const NvBlastExtCapsuleRadialDamageDesc*>(damageDesc); const nvidia::NvVec3& p0 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position0)); const nvidia::NvVec3& p1 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position1)); NvBounds3 b = NvBounds3::empty(); b.include(p0); b.include(p1); b.fattenFast(desc.maxRadius); return b; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Radial Graph Shader Template /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <DamageFunction damageFn, BoundFunction boundsFn> void RadialProfileGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const float* familyBondHealths = actor->familyBondHealths; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); uint32_t outCount = 0; auto processBondFn = [&](uint32_t bondIndex, uint32_t node0, uint32_t node1) { // skip bonds that are already broken or were visited already // TODO: investigate why testing against health > -1.0f seems slower // could reuse the island edge bitmap instead if (canTakeDamage(familyBondHealths[bondIndex])) { const NvBlastBond& bond = assetBonds[bondIndex]; const float totalBondDamage = damageFn(bond.centroid, programParams->damageDesc); if (totalBondDamage > 0.0f) { NvBlastBondFractureData& outCommand = commandBuffers->bondFractures[outCount++]; outCommand.nodeIndex0 = node0; outCommand.nodeIndex1 = node1; outCommand.health = totalBondDamage; } } }; const ExtDamageAcceleratorInternal* damageAccelerator = programParams->accelerator ? static_cast<const ExtDamageAcceleratorInternal*>(programParams->accelerator) : nullptr; const uint32_t ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE = actor->assetNodeCount / 3; if (damageAccelerator && actor->graphNodeCount > ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE) { nvidia::NvBounds3 bounds = boundsFn(programParams->damageDesc); const uint32_t CALLBACK_BUFFER_SIZE = 1000; class AcceleratorCallback : public ExtDamageAcceleratorInternal::ResultCallback { public: AcceleratorCallback(NvBlastFractureBuffers* commandBuffers, uint32_t& outCount, const NvBlastGraphShaderActor* actor, const NvBlastExtProgramParams* programParams) : ExtDamageAcceleratorInternal::ResultCallback(m_buffer, CALLBACK_BUFFER_SIZE), m_actor(actor), m_commandBuffers(commandBuffers), m_outCount(outCount), m_programParams(programParams) { } virtual void processResults(const ExtDamageAcceleratorInternal::QueryBondData* bondBuffer, uint32_t count) override { for (uint32_t i = 0; i < count; i++) { const ExtDamageAcceleratorInternal::QueryBondData& bondData = bondBuffer[i]; if (m_actor->nodeActorIndices[bondData.node0] == m_actor->actorIndex) { if (canTakeDamage(m_actor->familyBondHealths[bondData.bond])) { const NvBlastBond& bond = m_actor->assetBonds[bondData.bond]; const float totalBondDamage = damageFn(bond.centroid, m_programParams->damageDesc); if (totalBondDamage > 0.0f) { NvBlastBondFractureData& outCommand = m_commandBuffers->bondFractures[m_outCount++]; outCommand.nodeIndex0 = bondData.node0; outCommand.nodeIndex1 = bondData.node1; outCommand.health = totalBondDamage; } } } } } private: const NvBlastGraphShaderActor* m_actor; NvBlastFractureBuffers* m_commandBuffers; uint32_t& m_outCount; const NvBlastExtProgramParams* m_programParams; ExtDamageAcceleratorInternal::QueryBondData m_buffer[CALLBACK_BUFFER_SIZE]; }; AcceleratorCallback cb(commandBuffers, outCount, actor, programParams); damageAccelerator->findBondCentroidsInBounds(bounds, cb); } else { uint32_t currentNodeIndex = firstGraphNodeIndex; while (!Nv::Blast::isInvalidIndex(currentNodeIndex)) { for (uint32_t adj = adjacencyPartition[currentNodeIndex]; adj < adjacencyPartition[currentNodeIndex + 1]; adj++) { uint32_t adjacentNodeIndex = adjacentNodeIndices[adj]; if (currentNodeIndex < adjacentNodeIndex) { uint32_t bondIndex = adjacentBondIndices[adj]; processBondFn(bondIndex, currentNodeIndex, adjacentNodeIndex); } } currentNodeIndex = graphNodeIndexLinks[currentNodeIndex]; } } commandBuffers->bondFractureCount = outCount; commandBuffers->chunkFractureCount = 0; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Radial Single Shader Template /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <DamageFunction damageFn> void RadialProfileSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; const uint32_t chunkIndex = actor->chunkIndex; const NvBlastChunk* assetChunks = actor->assetChunks; const NvBlastChunk& chunk = assetChunks[chunkIndex]; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const float totalDamage = damageFn(chunk.centroid, programParams->damageDesc); if (totalDamage > 0.0f && chunkFractureCount < chunkFractureCountMax) { NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = totalDamage; } commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = chunkFractureCount; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Radial Shaders Instantiation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void NvBlastExtFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { RadialProfileGraphShader<pointDistanceDamage<falloffProfile>, sphereBounds>(commandBuffers, actor, params); } void NvBlastExtFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<pointDistanceDamage<falloffProfile>>(commandBuffers, actor, params); } void NvBlastExtCutterGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { RadialProfileGraphShader<pointDistanceDamage<cutterProfile>, sphereBounds>(commandBuffers, actor, params); } void NvBlastExtCutterSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<pointDistanceDamage<cutterProfile>>(commandBuffers, actor, params); } void NvBlastExtCapsuleFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { RadialProfileGraphShader<capsuleDistanceDamage<falloffProfile>, capsuleBounds>(commandBuffers, actor, params); } void NvBlastExtCapsuleFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<capsuleDistanceDamage<falloffProfile>>(commandBuffers, actor, params); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Shear Shader /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void NvBlastExtShearGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; uint32_t bondFractureCount = 0; uint32_t bondFractureCountMax = commandBuffers->bondFractureCount; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtShearDamageDesc& desc = *static_cast<const NvBlastExtShearDamageDesc*>(programParams->damageDesc); const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* chunkIndices = actor->chunkIndices; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const NvBlastChunk* assetChunks = actor->assetChunks; const float* familyBondHealths = actor->familyBondHealths; const float* supportChunkHealths = actor->supportChunkHealths; uint32_t closestNode = findClosestNode(desc.position , firstGraphNodeIndex, graphNodeIndexLinks , adjacencyPartition, adjacentNodeIndices, adjacentBondIndices , assetBonds, familyBondHealths , assetChunks, supportChunkHealths, chunkIndices); if (!isInvalidIndex(chunkIndices[closestNode])) { uint32_t nodeIndex = closestNode; float maxDist = 0.0f; uint32_t nextNode = invalidIndex<uint32_t>(); if (chunkFractureCount < chunkFractureCountMax) { const uint32_t chunkIndex = chunkIndices[nodeIndex]; const NvBlastChunk& chunk = assetChunks[chunkIndex]; NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = pointDistanceDamage<falloffProfile, NvBlastExtShearDamageDesc>(chunk.centroid, programParams->damageDesc); } do { const uint32_t startIndex = adjacencyPartition[nodeIndex]; const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1]; for (uint32_t adjacentNodeIndex = startIndex; adjacentNodeIndex < stopIndex; adjacentNodeIndex++) { const uint32_t neighbourIndex = adjacentNodeIndices[adjacentNodeIndex]; const uint32_t bondIndex = adjacentBondIndices[adjacentNodeIndex]; const NvBlastBond& bond = assetBonds[bondIndex]; if (!canTakeDamage(familyBondHealths[bondIndex])) continue; float shear = 1 * std::abs(1 - std::abs(VecMath::dot(desc.normal, bond.normal))); float d[3]; VecMath::sub(bond.centroid, desc.position, d); float ahead = VecMath::dot(d, desc.normal); if (ahead > maxDist) { maxDist = ahead; nextNode = neighbourIndex; } const float damage = pointDistanceDamage<falloffProfile, NvBlastExtShearDamageDesc>(bond.centroid, programParams->damageDesc); if (damage > 0.0f && bondFractureCount < bondFractureCountMax) { NvBlastBondFractureData& frac = commandBuffers->bondFractures[bondFractureCount++]; frac.userdata = bond.userData; frac.nodeIndex0 = nodeIndex; frac.nodeIndex1 = neighbourIndex; frac.health = shear * damage; } } if (nodeIndex == nextNode) break; nodeIndex = nextNode; } while (!isInvalidIndex(nextNode)); } commandBuffers->bondFractureCount = bondFractureCount; commandBuffers->chunkFractureCount = chunkFractureCount; } void NvBlastExtShearSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<pointDistanceDamage<falloffProfile, NvBlastExtShearDamageDesc>>(commandBuffers, actor, params); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Triangle Intersection Damage /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define SMALL_NUMBER (1.e-4f) bool intersectSegmentTriangle(const NvVec3& p, const NvVec3& q, const NvVec3& a, const NvVec3& b, const NvVec3& c, const NvPlane& trianglePlane) { const NvVec3 N = trianglePlane.n; const float D = trianglePlane.d; NvVec3 intersectPoint; float t = (-D - (p.dot(N))) / ((q - p).dot(N)); // If the parameter value is not between 0 and 1, there is no intersection if (t > -SMALL_NUMBER && t < 1.f + SMALL_NUMBER) { intersectPoint = p + t * (q - p); } else { return false; } // Compute the normal of the triangle const NvVec3 TriNorm = (b - a).cross(c - a); // Compute twice area of triangle ABC const float AreaABCInv = 1.0f / (N.dot(TriNorm)); // Compute v contribution const float AreaPBC = N.dot((b - intersectPoint).cross(c - intersectPoint)); const float v = AreaPBC * AreaABCInv; if (v <= 0.f) return false; // Compute w contribution const float AreaPCA = N.dot((c - intersectPoint).cross(a - intersectPoint)); const float w = AreaPCA * AreaABCInv; if (w <= 0.f) return false; const float u = 1.0f - v - w; return u > 0.f; } void NvBlastExtTriangleIntersectionGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const NvBlastChunk* assetChunks = actor->assetChunks; const uint32_t* chunkIndices = actor->chunkIndices; const float* familyBondHealths = actor->familyBondHealths; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtTriangleIntersectionDamageDesc& desc = *static_cast<const NvBlastExtTriangleIntersectionDamageDesc*>(programParams->damageDesc); const nvidia::NvVec3& t0 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position0)); const nvidia::NvVec3& t1 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position1)); const nvidia::NvVec3& t2 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position2)); const NvPlane trianglePlane(t0, t1, t2); uint32_t outCount = 0; const ExtDamageAcceleratorInternal* damageAccelerator = programParams->accelerator ? static_cast<const ExtDamageAcceleratorInternal*>(programParams->accelerator) : nullptr; const uint32_t ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE = actor->assetNodeCount / 3; if (damageAccelerator && actor->graphNodeCount > ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE) { const uint32_t CALLBACK_BUFFER_SIZE = 1000; class AcceleratorCallback : public ExtDamageAcceleratorInternal::ResultCallback { public: AcceleratorCallback(NvBlastFractureBuffers* commandBuffers, uint32_t& outCount, const NvBlastGraphShaderActor* actor, const NvBlastExtTriangleIntersectionDamageDesc& desc) : ExtDamageAcceleratorInternal::ResultCallback(m_buffer, CALLBACK_BUFFER_SIZE), m_actor(actor), m_commandBuffers(commandBuffers), m_outCount(outCount), m_desc(desc) { } virtual void processResults(const ExtDamageAcceleratorInternal::QueryBondData* bondBuffer, uint32_t count) override { const nvidia::NvVec3& t0 = (reinterpret_cast<const nvidia::NvVec3&>(m_desc.position0)); const nvidia::NvVec3& t1 = (reinterpret_cast<const nvidia::NvVec3&>(m_desc.position1)); const nvidia::NvVec3& t2 = (reinterpret_cast<const nvidia::NvVec3&>(m_desc.position2)); const NvPlane trianglePlane(t0, t1, t2); for (uint32_t i = 0; i < count; i++) { const ExtDamageAcceleratorInternal::QueryBondData& bondData = bondBuffer[i]; if (m_actor->nodeActorIndices[bondData.node0] == m_actor->actorIndex) { if (canTakeDamage(m_actor->familyBondHealths[bondData.bond])) { const NvBlastBond& bond = m_actor->assetBonds[bondData.bond]; const uint32_t chunkIndex0 = m_actor->chunkIndices[bondData.node0]; const uint32_t chunkIndex1 = m_actor->chunkIndices[bondData.node1]; const nvidia::NvVec3& c0 = (reinterpret_cast<const nvidia::NvVec3&>(m_actor->assetChunks[chunkIndex0].centroid)); const NvVec3& normal = (reinterpret_cast<const NvVec3&>(bond.normal)); const NvVec3& bondCentroid = (reinterpret_cast<const NvVec3&>(bond.centroid)); const nvidia::NvVec3& c1 = isInvalidIndex(chunkIndex1) ? (c0 + normal * (bondCentroid - c0).dot(normal)) : (reinterpret_cast<const nvidia::NvVec3&>(m_actor->assetChunks[chunkIndex1].centroid)); if(intersectSegmentTriangle(c0, c1, t0, t1, t2, trianglePlane)) { NvBlastBondFractureData& outCommand = m_commandBuffers->bondFractures[m_outCount++]; outCommand.nodeIndex0 = bondData.node0; outCommand.nodeIndex1 = bondData.node1; outCommand.health = m_desc.damage; } } } } } private: const NvBlastGraphShaderActor* m_actor; NvBlastFractureBuffers* m_commandBuffers; uint32_t& m_outCount; const NvBlastExtTriangleIntersectionDamageDesc& m_desc; ExtDamageAcceleratorInternal::QueryBondData m_buffer[CALLBACK_BUFFER_SIZE]; }; AcceleratorCallback cb(commandBuffers, outCount, actor, desc); damageAccelerator->findBondSegmentsPlaneIntersected(trianglePlane, cb); } else { uint32_t currentNodeIndex = firstGraphNodeIndex; while (!Nv::Blast::isInvalidIndex(currentNodeIndex)) { for (uint32_t adj = adjacencyPartition[currentNodeIndex]; adj < adjacencyPartition[currentNodeIndex + 1]; adj++) { uint32_t adjacentNodeIndex = adjacentNodeIndices[adj]; if (currentNodeIndex < adjacentNodeIndex) { uint32_t bondIndex = adjacentBondIndices[adj]; // skip bonds that are already broken or were visited already // TODO: investigate why testing against health > -1.0f seems slower // could reuse the island edge bitmap instead if (canTakeDamage(familyBondHealths[bondIndex])) { const NvBlastBond& bond = assetBonds[bondIndex]; const uint32_t chunkIndex0 = chunkIndices[currentNodeIndex]; const uint32_t chunkIndex1 = chunkIndices[adjacentNodeIndex]; const nvidia::NvVec3& c0 = (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex0].centroid)); const NvVec3& normal = (reinterpret_cast<const NvVec3&>(bond.normal)); const NvVec3& bondCentroid = (reinterpret_cast<const NvVec3&>(bond.centroid)); const nvidia::NvVec3& c1 = isInvalidIndex(chunkIndex1) ? (c0 + normal * (bondCentroid - c0).dot(normal)) : (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex1].centroid)); if (intersectSegmentTriangle(c0, c1, t0, t1, t2, trianglePlane)) { NvBlastBondFractureData& outCommand = commandBuffers->bondFractures[outCount++]; outCommand.nodeIndex0 = currentNodeIndex; outCommand.nodeIndex1 = adjacentNodeIndex; outCommand.health = desc.damage; } } } } currentNodeIndex = graphNodeIndexLinks[currentNodeIndex]; } } commandBuffers->bondFractureCount = outCount; commandBuffers->chunkFractureCount = 0; } void NvBlastExtTriangleIntersectionSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; const uint32_t chunkIndex = actor->chunkIndex; const NvBlastChunk* assetChunks = actor->assetChunks; const NvBlastChunk& chunk = assetChunks[chunkIndex]; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtTriangleIntersectionDamageDesc& desc = *static_cast<const NvBlastExtTriangleIntersectionDamageDesc*>(programParams->damageDesc); const nvidia::NvVec3& t0 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position0)); const nvidia::NvVec3& t1 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position1)); const nvidia::NvVec3& t2 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position2)); const NvPlane trianglePlane(t0, t1, t2); for (uint32_t subChunkIndex = chunk.firstChildIndex; subChunkIndex < chunk.childIndexStop; subChunkIndex++) { const nvidia::NvVec3& c0 = (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[subChunkIndex].centroid)); const nvidia::NvVec3& c1 = (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[subChunkIndex + 1].centroid)); if (chunkFractureCount < chunkFractureCountMax && intersectSegmentTriangle(c0, c1, t0, t1, t2, trianglePlane)) { NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = desc.damage; break; } } commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = chunkFractureCount; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Impact Spread Shader /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void NvBlastExtImpactSpreadGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { uint32_t bondFractureCount = 0; uint32_t bondFractureCountMax = commandBuffers->bondFractureCount; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtImpactSpreadDamageDesc& desc = *static_cast<const NvBlastExtImpactSpreadDamageDesc*>(programParams->damageDesc); const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* chunkIndices = actor->chunkIndices; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const NvBlastChunk* assetChunks = actor->assetChunks; const float* familyBondHealths = actor->familyBondHealths; const float* supportChunkHealths = actor->supportChunkHealths; // Find nearest chunk. uint32_t closestNode = findClosestNode(desc.position , firstGraphNodeIndex, graphNodeIndexLinks , adjacencyPartition, adjacentNodeIndices, adjacentBondIndices , assetBonds, familyBondHealths , assetChunks, supportChunkHealths, chunkIndices); // Breadth-first support graph traversal. For radial falloff metric distance is measured along the edges of the graph ExtDamageAcceleratorInternal* damageAccelerator = programParams->accelerator ? static_cast<ExtDamageAcceleratorInternal*>(programParams->accelerator) : nullptr; NVBLAST_ASSERT_WITH_MESSAGE(damageAccelerator, "This shader requires damage accelerator passed"); if (!isInvalidIndex(chunkIndices[closestNode]) && damageAccelerator) { struct NodeData { uint32_t index; float distance; }; // Calculating scratch size and requesting it from the accelerator const uint32_t bondCount = actor->adjacencyPartition[actor->assetNodeCount]; const size_t nodeQueueSize = align16(FixedQueue<NodeData>::requiredMemorySize(actor->graphNodeCount)); const size_t visitedBitmapSize = align16(FixedBitmap::requiredMemorySize(bondCount)); const size_t scratchSize = 16 + nodeQueueSize + visitedBitmapSize; void* scratch = damageAccelerator->getImmediateScratch(scratchSize); // prepare intermediate data on scratch scratch = (void*)align16((size_t)scratch); // Bump to 16-byte alignment FixedQueue<NodeData>* nodeQueue = new (scratch)FixedQueue<NodeData>(actor->graphNodeCount); scratch = pointerOffset(scratch, align16(nodeQueueSize)); FixedBitmap* visitedBitmap = new (scratch)FixedBitmap(bondCount); scratch = pointerOffset(scratch, align16(FixedBitmap::requiredMemorySize(bondCount))); // initalize traversal nodeQueue->pushBack({ closestNode, 0.f }); visitedBitmap->clear(); while (!nodeQueue->empty()) { NodeData currentNode = nodeQueue->popFront(); const uint32_t startIndex = adjacencyPartition[currentNode.index]; const uint32_t stopIndex = adjacencyPartition[currentNode.index + 1]; for (uint32_t adjacentNodeIndex = startIndex; adjacentNodeIndex < stopIndex; adjacentNodeIndex++) { const uint32_t neighbourIndex = adjacentNodeIndices[adjacentNodeIndex]; const uint32_t bondIndex = adjacentBondIndices[adjacentNodeIndex]; const NvBlastBond& bond = assetBonds[bondIndex]; const NvVec3& bondCentroid = (reinterpret_cast<const NvVec3&>(bond.centroid)); if (!canTakeDamage(familyBondHealths[bondIndex])) continue; if (visitedBitmap->test(bondIndex)) continue; visitedBitmap->set(bondIndex); const uint32_t chunkIndex0 = chunkIndices[currentNode.index]; const uint32_t chunkIndex1 = chunkIndices[neighbourIndex]; const nvidia::NvVec3& c0 = reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex0].centroid); bool isNeighbourWorldChunk = isInvalidIndex(chunkIndex1); const nvidia::NvVec3& c1 = isNeighbourWorldChunk ? bondCentroid : (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex1].centroid)); const float distance = (c1 - c0).magnitude() * (isNeighbourWorldChunk ? 2.f : 1.f); float totalDistance = currentNode.distance + distance; float totalDamage = desc.damage * falloffProfile(desc.minRadius, desc.maxRadius, totalDistance); if (totalDamage > 0.0f && bondFractureCount < bondFractureCountMax) { NvBlastBondFractureData& frac = commandBuffers->bondFractures[bondFractureCount++]; frac.userdata = bond.userData; frac.nodeIndex0 = currentNode.index; frac.nodeIndex1 = neighbourIndex; frac.health = totalDamage; if (!isNeighbourWorldChunk) { nodeQueue->pushBack({ neighbourIndex, totalDistance }); } } } } } commandBuffers->bondFractureCount = bondFractureCount; commandBuffers->chunkFractureCount = 0; } void NvBlastExtImpactSpreadSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; const uint32_t chunkIndex = actor->chunkIndex; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtImpactSpreadDamageDesc& desc = *static_cast<const NvBlastExtImpactSpreadDamageDesc*>(programParams->damageDesc); if (chunkFractureCount < chunkFractureCountMax) { NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = desc.damage; } commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = chunkFractureCount; }
36,490
C++
47.460823
185
0.626528
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtSerializationInternal.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtSerialization.h" #include <cstring> #define ExtSerializerBoilerplate(_name, _description, _objectTypeID, _encodingID) \ virtual const char* getName() const override { return _name; } \ virtual const char* getDescription() const override { return _description; } \ virtual uint32_t getObjectTypeID() const override { return _objectTypeID; } \ virtual uint32_t getEncodingID() const override { return _encodingID; } #define ExtSerializerReadOnly(_name) \ virtual bool isReadOnly() const override { return true; } \ virtual uint64_t serializeIntoBuffer \ ( \ void*& buffer, \ ExtSerialization::BufferProvider& bufferProvider, \ const void* object, \ uint64_t offset = 0 \ ) override \ { \ NVBLAST_LOG_WARNING(#_name "::serializeIntoBuffer: serializer is read-only."); \ NV_UNUSED(buffer); \ NV_UNUSED(bufferProvider); \ NV_UNUSED(object); \ NV_UNUSED(offset); \ return 0; \ } #define ExtSerializerDefaultFactoryAndRelease(_classname) \ static ExtSerializer* create() \ { \ return NVBLAST_NEW(_classname) (); \ } \ virtual void release() override \ { \ NVBLAST_DELETE(this, _classname); \ } namespace Nv { namespace Blast { /** Serializer internal interface */ class ExtSerializer { public: virtual ~ExtSerializer() {} /** return the name of this serializer. */ virtual const char* getName() const = 0; /** return a description of this serializer. */ virtual const char* getDescription() const = 0; /** return an identifier for the type of object handled. */ virtual uint32_t getObjectTypeID() const = 0; /** return an identifier for serialization format. */ virtual uint32_t getEncodingID() const = 0; /** Whether or not this serializer supports writing. Legacy formats, for example, may not. \return true iff this serialization does not support writing. */ virtual bool isReadOnly() const { return false; } /** Deserialize from a buffer into a newly allocated object. \param[in] buffer Pointer to the buffer to read. \param[in] size Size of the buffer to read. \return object pointer; returns null if failed to deserialize. */ virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) = 0; /** Serialize into a buffer. Allocates the buffer internally using the ExtSerialization::BufferProvider callack interface. \param[out] buffer Pointer to the buffer created. \param[in] bufferProvider The buffer provider callback interface to use. \param[in] object Object pointer. \return the number of bytes serialized into the buffer (zero if unsuccessful). */ virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) = 0; /** Release the serializer and free associated memory. */ virtual void release() = 0; }; /** Internal serialization manager interface */ class ExtSerializationInternal : public ExtSerialization { public: /** Internal interfaces to register and unregister a serializer, used by modules to automatically register all of their serializers with a serialization manager. */ virtual bool registerSerializer(ExtSerializer& serializer) = 0; virtual bool unregisterSerializer(ExtSerializer& serializer) = 0; /** Find a registered serializer for the given object type and encoding. \param[in] objectTypeID ID for the requested object type. \param[in] encodingID ID for the requested encoding (see EncodingID). \return a registered serializer if found, NULL otherwise. */ virtual ExtSerializer* findSerializer(uint32_t objectTypeID, uint32_t encodingID) = 0; //// Enums //// enum { HeaderSize = 128 }; }; template<typename Factory, size_t N> size_t ExtSerializationLoadSet(Nv::Blast::ExtSerializationInternal& serialization, Factory(&factories)[N]) { size_t count = 0; for (auto f : factories) { Nv::Blast::ExtSerializer* serializer = f(); if (serializer != nullptr) { if (serialization.registerSerializer(*serializer)) { ++count; } else { NVBLAST_LOG_ERROR("Nv::Blast::ExtSerializationLoadSet: failed to register serailizer:"); NVBLAST_LOG_ERROR(serializer->getName()); serializer->release(); } } else { NVBLAST_LOG_ERROR("Nv::Blast::ExtSerializationLoadSet: failed to create serailizer."); } } return count; } class ExtIStream { public: enum Flags { LittleEndian = (1 << 0), Fail = (1 << 1) }; ExtIStream(const void* buffer, size_t size) : m_buf(reinterpret_cast<const char*>(buffer)), m_flags(0) { m_cur = m_buf; m_end = m_buf + size; const uint16_t x = LittleEndian; m_flags = *reinterpret_cast<const char*>(&x); } bool advance(ptrdiff_t diff) { m_cur += diff; if (m_cur < m_buf) { m_cur = m_buf; m_flags |= Fail; return false; } else if (m_cur > m_end) { m_cur = m_end; m_flags |= Fail; return false; } return true; } const void* view() { return m_cur; } bool read(void* buffer, size_t size) { if (!canRead(size)) return false; std::memcpy(buffer, m_cur, size); m_cur += size; return true; } size_t tellg() const { return m_cur - m_buf; } size_t left() const { return m_end - m_cur; } bool eof() const { return m_cur >= m_end; } bool fail() const { return (m_flags & Fail) != 0; } private: const char* m_buf; const char* m_cur; const char* m_end; uint32_t m_flags; bool isLittleEndian() const { return (m_flags & LittleEndian) != 0; } bool canRead(size_t size) const { return m_cur + size <= m_end; } template<typename T> friend ExtIStream& operator >> (ExtIStream& s, T& x); }; template<typename T> NV_INLINE ExtIStream& operator >> (ExtIStream& s, T& x) { if (s.canRead(sizeof(T))) { if (s.isLittleEndian()) { x = *reinterpret_cast<const T*>(s.m_cur); s.m_cur += sizeof(T); } else { char* b = reinterpret_cast<char*>(&x) + sizeof(T); for (size_t n = sizeof(T); n--;) *--b = *s.m_cur++; } } else { s.m_flags |= ExtIStream::Fail; } return s; } } // namespace Blast } // namespace Nv
9,883
C
32.965636
154
0.540828
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtTkSerializerRAW.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerializationInternal.h" #include "NvBlastTkFramework.h" #include "NvBlastTkAsset.h" #include "NvBlast.h" namespace Nv { namespace Blast { // Legacy IDs struct ExtTkSerializationLegacyID { enum Enum { Framework = NVBLAST_FOURCC('T', 'K', 'F', 'W'), //!< TkFramework identifier token, used in serialization Asset = NVBLAST_FOURCC('A', 'S', 'S', 'T'), //!< TkAsset identifier token, used in serialization Family = NVBLAST_FOURCC('A', 'C', 'T', 'F'), //!< TkFamily identifier token, used in serialization }; }; // Legacy object format versions struct ExtTkSerializationLegacyAssetVersion { enum Enum { /** Initial version */ Initial, // New formats must come before Count. They should be given descriptive names with more information in comments. /** The number of serialized formats. */ Count, /** The current version. This should always be Count-1 */ Current = Count - 1 }; }; struct ExtTkSerializationLegacyFamilyVersion { enum Enum { /** Initial version */ Initial, // New formats must come before Count. They should be given descriptive names with more information in comments. /** The number of serialized formats. */ Count, /** The current version. This should always be Count-1 */ Current = Count - 1 }; }; static bool deserializeTkObjectHeader(uint32_t& legacyTypeID, uint32_t& legacyVersion, NvBlastID& objID, uint64_t& userIntData, ExtIStream& stream) { // Read framework ID uint32_t fwkID = 0; // Initialize to silence some compilers stream >> fwkID; if (fwkID != ExtTkSerializationLegacyID::Framework) { NVBLAST_LOG_ERROR("deserializeTkObjectHeader: stream does not contain a BlastTk legacy object."); return false; } // Read object class ID stream >> legacyTypeID; // Read object class version and ensure it's current stream >> legacyVersion; // Object ID stream.read(objID.data, sizeof(NvBlastID)); // Serializable user data uint32_t lsd, msd; stream >> lsd >> msd; userIntData = static_cast<uint64_t>(msd) << 32 | static_cast<uint64_t>(lsd); return !stream.fail(); } TkAsset* deserializeTkAsset(ExtIStream& stream, TkFramework& framework) { // Deserializer header uint32_t legacyTypeID; uint32_t legacyVersion; NvBlastID objID; uint64_t userIntData; if (!deserializeTkObjectHeader(legacyTypeID, legacyVersion, objID, userIntData, stream)) { return nullptr; } if (legacyTypeID != ExtTkSerializationLegacyID::Asset) { NVBLAST_LOG_ERROR("deserializeTkAsset: stream does not contain a BlastTk legacy asset."); return nullptr; } if (legacyVersion > ExtTkSerializationLegacyAssetVersion::Current) { NVBLAST_LOG_ERROR("deserializeTkAsset: stream contains a BlastTk legacy asset which is in an unknown version."); return nullptr; } // LL asset uint32_t assetSize; stream >> assetSize; NvBlastAsset* llAsset = static_cast<NvBlastAsset*>(NVBLAST_ALLOC_NAMED(assetSize, "deserializeTkAsset")); stream.read(reinterpret_cast<char*>(llAsset), assetSize); // Joint descs uint32_t jointDescCount; stream >> jointDescCount; std::vector<TkAssetJointDesc> jointDescs(jointDescCount); for (uint32_t i = 0; i < jointDescs.size(); ++i) { TkAssetJointDesc& jointDesc = jointDescs[i]; stream >> jointDesc.nodeIndices[0]; stream >> jointDesc.nodeIndices[1]; stream >> jointDesc.attachPositions[0].x; stream >> jointDesc.attachPositions[0].y; stream >> jointDesc.attachPositions[0].z; stream >> jointDesc.attachPositions[1].x; stream >> jointDesc.attachPositions[1].y; stream >> jointDesc.attachPositions[1].z; } if (stream.fail()) { NVBLAST_FREE(llAsset); return nullptr; } TkAsset* asset = framework.createAsset(llAsset, jointDescs.data(), (uint32_t)jointDescs.size(), true); NvBlastID zeroID; memset(zeroID.data, 0, sizeof(zeroID)); if (!memcmp(zeroID.data, objID.data, sizeof(NvBlastID))) { asset->setID(objID); } asset->userIntData = userIntData; return asset; } } // namespace Blast } // namespace Nv
5,965
C++
31.423913
147
0.682481
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtSerializationCAPN.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "capnp/serialize.h" #include "NvBlastExtInputStream.h" #include "NvBlastExtOutputStream.h" #include "NvBlastArray.h" #include "NvBlastExtSerialization.h" namespace Nv { namespace Blast { template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> class ExtSerializationCAPN { public: static TObject* deserializeFromBuffer(const unsigned char* input, uint64_t size); static TObject* deserializeFromStream(std::istream& inputStream); static uint64_t serializationBufferSize(const TObject* object); static bool serializeIntoBuffer(const TObject* object, unsigned char* buffer, uint64_t maxSize, uint64_t& usedSize); static bool serializeIntoBuffer(const TObject *object, unsigned char*& buffer, uint64_t& size, ExtSerialization::BufferProvider* bufferProvider = nullptr, uint64_t offset = 0); static bool serializeIntoStream(const TObject* object, std::ostream& outputStream); private: // Specialized static bool serializeIntoBuilder(TSerializationBuilder& objectBuilder, const TObject* object); static bool serializeIntoMessage(capnp::MallocMessageBuilder& message, const TObject* object); static TObject* deserializeFromStreamReader(capnp::InputStreamMessageReader& message); }; template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> TObject* ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::deserializeFromBuffer(const unsigned char* input, uint64_t size) { kj::ArrayPtr<const unsigned char> source(input, size); kj::ArrayInputStream inputStream(source); Nv::Blast::Array<uint64_t>::type scratch(static_cast<uint32_t>(size)); kj::ArrayPtr<capnp::word> scratchArray((capnp::word*) scratch.begin(), size); capnp::InputStreamMessageReader message(inputStream, capnp::ReaderOptions(), scratchArray); return deserializeFromStreamReader(message); } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> TObject* ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::deserializeFromStream(std::istream& inputStream) { ExtInputStream readStream(inputStream); capnp::InputStreamMessageReader message(readStream); return deserializeFromStreamReader(message); } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> uint64_t ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializationBufferSize(const TObject* object) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { return 0; } return computeSerializedSizeInWords(message) * sizeof(uint64_t); } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> bool ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializeIntoBuffer(const TObject* object, unsigned char* buffer, uint64_t maxSize, uint64_t& usedSize) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { usedSize = 0; return false; } uint64_t messageSize = computeSerializedSizeInWords(message) * sizeof(uint64_t); if (maxSize < messageSize) { NVBLAST_LOG_ERROR("When attempting to serialize into an existing buffer, the provided buffer was too small."); usedSize = 0; return false; } kj::ArrayPtr<unsigned char> outputBuffer(buffer, maxSize); kj::ArrayOutputStream outputStream(outputBuffer); capnp::writeMessage(outputStream, message); usedSize = messageSize; return true; } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> bool ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializeIntoBuffer(const TObject *object, unsigned char*& buffer, uint64_t& size, ExtSerialization::BufferProvider* bufferProvider, uint64_t offset) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { buffer = nullptr; size = 0; return false; } const uint64_t blockSize = computeSerializedSizeInWords(message) * sizeof(uint64_t); size = blockSize + offset; buffer = static_cast<unsigned char *>(bufferProvider != nullptr ? bufferProvider->requestBuffer(size) : NVBLAST_ALLOC(size)); kj::ArrayPtr<unsigned char> outputBuffer(buffer + offset, blockSize); kj::ArrayOutputStream outputStream(outputBuffer); capnp::writeMessage(outputStream, message); return true; } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> bool ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializeIntoStream(const TObject* object, std::ostream& outputStream) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { return false; } ExtOutputStream blastOutputStream(outputStream); writeMessage(blastOutputStream, message); return true; } } // namespace Blast } // namespace Nv
6,870
C
35.547872
230
0.760844
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerialization.h" #include "NvBlastExtLlSerialization.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include "NvBlastExtSerializationInternal.h" namespace Nv { namespace Blast { class ExtSerializationImpl : public ExtSerializationInternal { public: // Default buffer provider class AllocBufferProvider : public ExtSerialization::BufferProvider { public: virtual void* requestBuffer(size_t size) override; }; ExtSerializationImpl(); ~ExtSerializationImpl(); // ExtSerialization interface begin virtual bool setSerializationEncoding(uint32_t encodingID) override; virtual uint32_t getSerializationEncoding() const override; virtual void setBufferProvider(BufferProvider* bufferProvider) override; virtual bool peekHeader(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const void* buffer, uint64_t bufferSize) override; virtual const void* skipObject(uint64_t& bufferSize, const void* buffer) override; virtual void* deserializeFromBuffer(const void* buffer, uint64_t size, uint32_t* objectTypeIDPtr = nullptr) override; virtual uint64_t serializeIntoBuffer(void*& buffer, const void* object, uint32_t objectTypeID) override; virtual void release() override; // ExtSerialization interface end // ExtSerializationInternal interface begin virtual bool registerSerializer(ExtSerializer& serializer) override; virtual bool unregisterSerializer(ExtSerializer& serializer) override; virtual ExtSerializer* findSerializer(uint32_t objectTypeID, uint32_t encodingID) override; // ExtSerializationInternal interface end private: char* writeHeaderIntoBuffer(char* buffer, uint64_t bufferSize, uint32_t objectTypeID, uint32_t encodingID, uint64_t dataSize) const; const char* readHeaderFromBuffer(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const char* buffer, uint64_t bufferSize) const; //// Static data //// static const char* s_identifier; static const char* s_version; static AllocBufferProvider s_defaultBufferProvider; //// Member data //// HashMap<uint64_t, ExtSerializer*>::type m_serializers; uint32_t m_serializationEncoding; BufferProvider* m_bufferProvider; }; //////// ExtSerializationImpl static member variables //////// /** Module identifying header. This should never change. */ const char* ExtSerializationImpl::s_identifier = "NVidia(r) GameWorks Blast(tm) v."; const char* ExtSerializationImpl::s_version = "1"; ExtSerializationImpl::AllocBufferProvider ExtSerializationImpl::s_defaultBufferProvider; //////// Local utility functions //////// static NV_INLINE uint64_t generateKey(uint32_t objectTypeID, uint32_t encodingID) { return static_cast<uint64_t>(encodingID) << 32 | static_cast<uint64_t>(objectTypeID); } static NV_INLINE uint64_t generateKey(const ExtSerializer& serializer) { return generateKey(serializer.getObjectTypeID(), serializer.getEncodingID()); } static NV_INLINE void writeIDToBuffer(char* buffer, uint32_t id) { for (int i = 0; i < 4; ++i, id >>= 8) { *buffer++ = static_cast<char>(id & 0xFF); } } static NV_INLINE uint32_t readIDFromBuffer(const char* buffer) { return NVBLAST_FOURCC(buffer[0], buffer[1], buffer[2], buffer[3]); } static NV_INLINE void writeU64InHexToBuffer(char* buffer, uint64_t val) { for (char* curr = buffer + 16; curr-- > buffer; val >>= 4) { *curr = "0123456789ABCDEF"[val & 0xF]; } } static NV_INLINE uint64_t readU64InHexFromBuffer(const char* buffer) { uint64_t val = 0; for (const char* curr = buffer; curr < buffer + 16; ++curr) { const char c = *curr; const char msn = c >> 4; const char mask = ((88 >> msn) & 1) - 1; const unsigned char digit = "\x0\x1\x2\x3\x4\x5\x6\x7\x8\x9\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xA\xB\xC\xD\xE\xF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"[((msn - 3) & 1) << 4 | (c & 0xF)] | mask; if (digit == 0xFF) { return 0; // Not a hexidecimal digit } val = val << 4 | digit; } return val; } //////// ExtSerialization member functions //////// ExtSerializationImpl::ExtSerializationImpl() : m_serializationEncoding(EncodingID::CapnProtoBinary), m_bufferProvider(&s_defaultBufferProvider) { } ExtSerializationImpl::~ExtSerializationImpl() { // Release and remove all registered serializers Array<ExtSerializer*>::type registeredSerializers; registeredSerializers.reserve(m_serializers.size()); for (auto it = m_serializers.getIterator(); !it.done(); ++it) { registeredSerializers.pushBack(it->second); } m_serializers.clear(); for (uint32_t i = 0; i < registeredSerializers.size(); ++i) { registeredSerializers[i]->release(); } } char* ExtSerializationImpl::writeHeaderIntoBuffer(char* buffer, uint64_t bufferSize, uint32_t objectTypeID, uint32_t encodingID, uint64_t dataSize) const { if (bufferSize < HeaderSize) { return nullptr; } char* stop = buffer + HeaderSize; size_t versionLen = strlen(s_version); if (versionLen > 63) { versionLen = 63; } memset(buffer, ' ', HeaderSize); memcpy(buffer, s_identifier, 32); buffer += 32; memcpy(buffer, s_version, versionLen); buffer += 64; writeIDToBuffer(buffer, objectTypeID); buffer += 5; writeIDToBuffer(buffer, encodingID); buffer += 5; writeU64InHexToBuffer(buffer, dataSize); buffer += 16; *(stop - 1) = '\n'; return stop; } const char* ExtSerializationImpl::readHeaderFromBuffer(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const char* buffer, uint64_t bufferSize) const { if (bufferSize < HeaderSize) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: header terminator not found."); return nullptr; } const char* stop = buffer + HeaderSize; if (memcmp(buffer, s_identifier, 32)) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: file identifier does not match expected value."); return nullptr; } buffer += 32; const char* s = strchr(buffer, ' '); if (s == nullptr) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: file format error reading serializer library version."); } if (memcmp(buffer, s_version, s - buffer)) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: file version does not match serializer library version."); return nullptr; } buffer += 64; if (objectTypeID != nullptr) { *objectTypeID = readIDFromBuffer(buffer); } buffer += 5; if (encodingID != nullptr) { *encodingID = readIDFromBuffer(buffer); } buffer += 5; if (dataSize != nullptr) { *dataSize = readU64InHexFromBuffer(buffer); } buffer += 16; return stop; } bool ExtSerializationImpl::registerSerializer(ExtSerializer& serializer) { return m_serializers.insert(generateKey(serializer), &serializer); } bool ExtSerializationImpl::unregisterSerializer(ExtSerializer& serializer) { const uint64_t key = generateKey(serializer); const auto entry = m_serializers.find(key); if (entry == nullptr) { return false; } entry->second->release(); return m_serializers.erase(key); } ExtSerializer* ExtSerializationImpl::findSerializer(uint32_t objectTypeID, uint32_t encodingID) { auto entry = m_serializers.find(generateKey(objectTypeID, encodingID)); return entry != nullptr ? entry->second : nullptr; } bool ExtSerializationImpl::setSerializationEncoding(uint32_t encodingID) { m_serializationEncoding = encodingID; return true; } uint32_t ExtSerializationImpl::getSerializationEncoding() const { return m_serializationEncoding; } void ExtSerializationImpl::setBufferProvider(BufferProvider* bufferProvider) { m_bufferProvider = bufferProvider != nullptr ? bufferProvider : &s_defaultBufferProvider; } bool ExtSerializationImpl::peekHeader(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const void* buffer, uint64_t bufferSize) { return nullptr != readHeaderFromBuffer(objectTypeID, encodingID, dataSize, reinterpret_cast<const char*>(buffer), bufferSize); } const void* ExtSerializationImpl::skipObject(uint64_t& bufferSize, const void* buffer) { uint64_t dataSize; const char* next = readHeaderFromBuffer(nullptr, nullptr, &dataSize, static_cast<const char*>(buffer), bufferSize); if (next == nullptr) { return nullptr; } next += dataSize; const uint64_t skipSize = next - static_cast<const char*>(buffer); NVBLAST_CHECK_ERROR(skipSize <= bufferSize, "Object size in buffer is too large for given buffer size.", return nullptr); bufferSize -= skipSize; return next; } void* ExtSerializationImpl::deserializeFromBuffer(const void* buffer, uint64_t bufferSize, uint32_t* objectTypeIDPtr) { uint32_t objectTypeID; uint32_t encodingID; uint64_t dataSize; void* result = nullptr; buffer = readHeaderFromBuffer(&objectTypeID, &encodingID, &dataSize, reinterpret_cast<const char*>(buffer), bufferSize); if (buffer != nullptr) { auto entry = m_serializers.find(generateKey(objectTypeID, encodingID)); if (entry != nullptr && entry->second != nullptr) { result = entry->second->deserializeFromBuffer(buffer, dataSize); } } if (objectTypeIDPtr != nullptr) { *objectTypeIDPtr = result != nullptr ? objectTypeID : 0; } return result; } uint64_t ExtSerializationImpl::serializeIntoBuffer(void*& buffer, const void* object, uint32_t objectTypeID) { if (!m_serializationEncoding) { NVBLAST_LOG_ERROR("ExtSerializationImpl::serializeIntoBuffer: no serialization encoding has been set."); return false; // No encoding available } auto entry = m_serializers.find(generateKey(objectTypeID, m_serializationEncoding)); if (entry == nullptr || entry->second == nullptr) { return false; } const uint64_t size = entry->second->serializeIntoBuffer(buffer, *m_bufferProvider, object, HeaderSize); if (size < HeaderSize) { NVBLAST_LOG_ERROR("ExtSerializationImpl::serializeIntoBuffer: failed to write data to buffer."); return 0; } writeHeaderIntoBuffer(reinterpret_cast<char*>(buffer), HeaderSize, objectTypeID, m_serializationEncoding, size - HeaderSize); return size; } void ExtSerializationImpl::release() { NVBLAST_DELETE(this, ExtSerializationImpl); } //////// ExtSerializationImpl::AllocBufferProvider member functions //////// void* ExtSerializationImpl::AllocBufferProvider::requestBuffer(size_t size) { return NVBLAST_ALLOC(size); } } // namespace Blast } // namespace Nv Nv::Blast::ExtSerialization* NvBlastExtSerializationCreate() { Nv::Blast::ExtSerializationImpl* serialization = NVBLAST_NEW(Nv::Blast::ExtSerializationImpl) (); // Automatically load LL serializers NvBlastExtLlSerializerLoadSet(*serialization); return serialization; }
13,068
C++
31.031863
192
0.6897
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtTkSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerializationInternal.h" #include "NvBlastExtTkSerialization.h" #include "NvBlastExtTkSerializerCAPN.h" #include "NvBlastExtTkSerializerRAW.h" namespace Nv { namespace Blast { TkFramework* sExtTkSerializerFramework = nullptr; class ExtTkSerializerAsset_CPNB : public ExtSerializer { public: ExtSerializerBoilerplate("TkAsset_CPNB", "Blast high-level asset (Nv::Blast::TkAsset) serialization using Cap'n Proto binary format.", TkObjectTypeID::Asset, ExtSerialization::EncodingID::CapnProtoBinary); ExtSerializerDefaultFactoryAndRelease(ExtTkSerializerAsset_CPNB); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { return ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::deserializeFromBuffer(reinterpret_cast<const unsigned char*>(buffer), size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { uint64_t usedSize; if (!ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::serializeIntoBuffer(reinterpret_cast<const TkAsset*>(object), reinterpret_cast<unsigned char*&>(buffer), usedSize, &bufferProvider, offset)) { return 0; } return usedSize; } }; class ExTkSerializerAsset_RAW : public ExtSerializer { public: ExtSerializerBoilerplate("TkAsset_RAW", "Blast high-level asset (Nv::Blast::TkAsset) serialization using raw memory format.", TkObjectTypeID::Asset, ExtSerialization::EncodingID::RawBinary); ExtSerializerDefaultFactoryAndRelease(ExTkSerializerAsset_RAW); ExtSerializerReadOnly(ExTkSerializerAsset_RAW); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { ExtIStream stream(buffer, size); return deserializeTkAsset(stream, *sExtTkSerializerFramework); } }; } // namespace Blast } // namespace Nv /////////////////////////////////////// size_t NvBlastExtTkSerializerLoadSet(Nv::Blast::TkFramework& framework, Nv::Blast::ExtSerialization& serialization) { Nv::Blast::sExtTkSerializerFramework = &framework; Nv::Blast::ExtSerializer* (*factories[])() = { Nv::Blast::ExtTkSerializerAsset_CPNB::create, Nv::Blast::ExTkSerializerAsset_RAW::create }; return Nv::Blast::ExtSerializationLoadSet(static_cast<Nv::Blast::ExtSerializationInternal&>(serialization), factories); } uint64_t NvBlastExtSerializationSerializeTkAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const Nv::Blast::TkAsset* asset) { return serialization.serializeIntoBuffer(buffer, asset, Nv::Blast::TkObjectTypeID::Asset); }
4,373
C++
40.657142
209
0.750057
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtLlSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerializationInternal.h" #include "NvBlastExtLlSerialization.h" #include "NvBlastExtLlSerializerCAPN.h" namespace Nv { namespace Blast { class ExtLlSerializerAsset_CPNB : public ExtSerializer { public: ExtSerializerBoilerplate("LLAsset_CPNB", "Blast low-level asset (NvBlastAsset) serialization using Cap'n Proto binary format.", LlObjectTypeID::Asset, ExtSerialization::EncodingID::CapnProtoBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerAsset_CPNB); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { return ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::deserializeFromBuffer(reinterpret_cast<const unsigned char*>(buffer), size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { uint64_t usedSize; if (!ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::serializeIntoBuffer(reinterpret_cast<const Asset*>(object), reinterpret_cast<unsigned char*&>(buffer), usedSize, &bufferProvider, offset)) { return 0; } return usedSize; } }; class ExtLlSerializerFamily_CPNB : public ExtSerializer { public: ExtSerializerBoilerplate("LLFamily_CPNB", "Blast low-level family (NvBlastFamily) serialization using Cap'n Proto binary format.", LlObjectTypeID::Family, ExtSerialization::EncodingID::CapnProtoBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerFamily_CPNB); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { return ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::deserializeFromBuffer(reinterpret_cast<const unsigned char*>(buffer), size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { uint64_t usedSize; if (!ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::serializeIntoBuffer(reinterpret_cast<const FamilyHeader*>(object), reinterpret_cast<unsigned char*&>(buffer), usedSize, &bufferProvider, offset)) { return 0; } return usedSize; } }; class ExtLlSerializerObject_RAW : public ExtSerializer { public: virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { const NvBlastDataBlock* block = reinterpret_cast<const NvBlastDataBlock*>(buffer); if (static_cast<uint64_t>(block->size) > size) { return nullptr; } void* llobject = NVBLAST_ALLOC(block->size); return memcpy(llobject, block, block->size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { const NvBlastDataBlock* block = reinterpret_cast<const NvBlastDataBlock*>(object); const uint64_t size = block->size + offset; buffer = bufferProvider.requestBuffer(size); if (buffer == nullptr) { return 0; } memcpy(static_cast<char*>(buffer) + offset, object, block->size); return size; } }; class ExtLlSerializerAsset_RAW : public ExtLlSerializerObject_RAW { public: ExtSerializerBoilerplate("LLAsset_RAW", "Blast low-level asset (NvBlastAsset) serialization using raw memory format.", LlObjectTypeID::Asset, ExtSerialization::EncodingID::RawBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerAsset_RAW); }; class ExtLlSerializerFamily_RAW : public ExtLlSerializerObject_RAW { public: ExtSerializerBoilerplate("LLFamily_RAW", "Blast low-level family (NvBlastFamily) serialization using raw memory format.", LlObjectTypeID::Family, ExtSerialization::EncodingID::RawBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerFamily_RAW); }; } // namespace Blast } // namespace Nv /////////////////////////////////////// size_t NvBlastExtLlSerializerLoadSet(Nv::Blast::ExtSerialization& serialization) { Nv::Blast::ExtSerializer* (*factories[])() = { Nv::Blast::ExtLlSerializerAsset_CPNB::create, Nv::Blast::ExtLlSerializerAsset_RAW::create, Nv::Blast::ExtLlSerializerFamily_CPNB::create, Nv::Blast::ExtLlSerializerFamily_RAW::create }; return Nv::Blast::ExtSerializationLoadSet(static_cast<Nv::Blast::ExtSerializationInternal&>(serialization), factories); } uint64_t NvBlastExtSerializationSerializeAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastAsset* asset) { return serialization.serializeIntoBuffer(buffer, asset, Nv::Blast::LlObjectTypeID::Asset); } uint64_t NvBlastExtSerializationSerializeFamilyIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastFamily* family) { return serialization.serializeIntoBuffer(buffer, family, Nv::Blast::LlObjectTypeID::Family); }
6,780
C++
41.118012
206
0.737316
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/ActorDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "ActorDTO.h" #include "NvBlastGlobals.h" #include "NvBlastIDDTO.h" #include "NvBlastChunkDTO.h" #include "NvBlastBondDTO.h" namespace Nv { namespace Blast { bool ActorDTO::serialize(Nv::Blast::Serialization::Actor::Builder builder, const Nv::Blast::Actor* poco) { builder.setFamilyOffset(poco->getFamilyOffset()); builder.setFirstVisibleChunkIndex(poco->getFirstVisibleChunkIndex()); builder.setVisibleChunkCount(poco->getVisibleChunkCount()); builder.setFirstGraphNodeIndex(poco->getFirstGraphNodeIndex()); builder.setGraphNodeCount(poco->getGraphNodeCount()); builder.setLeafChunkCount(poco->getLeafChunkCount()); return true; } Nv::Blast::Actor* ActorDTO::deserialize(Nv::Blast::Serialization::Actor::Reader reader) { NV_UNUSED(reader); return nullptr; } bool ActorDTO::deserializeInto(Nv::Blast::Serialization::Actor::Reader reader, Nv::Blast::Actor* poco) { poco->setFamilyOffset(reader.getFamilyOffset()); poco->setFirstVisibleChunkIndex(reader.getFirstVisibleChunkIndex()); poco->setVisibleChunkCount(reader.getVisibleChunkCount()); poco->setFirstGraphNodeIndex(reader.getFirstGraphNodeIndex()); poco->setGraphNodeCount(reader.getGraphNodeCount()); poco->setLeafChunkCount(reader.getLeafChunkCount()); return true; } } // namespace Blast } // namespace Nv
2,914
C++
38.391891
104
0.762183
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxConvexMeshGeometryDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "PxConvexMeshGeometryDTO.h" #include "PxMeshScaleDTO.h" #include "NvBlastAssert.h" #include "NvBlastExtKJPxInputStream.h" #include "NvBlastExtKJPxOutputStream.h" #include "PxConvexMeshDesc.h" #include "NvBlastExtSerialization.h" #include "PxVec3.h" #include <algorithm> #include <vector> #include "PxPhysics.h" #include "NvBlastPxCallbacks.h" #include "PxDefaultStreams.h" namespace Nv { namespace Blast { extern physx::PxPhysics* sExtPxSerializerPhysics; extern physx::PxCooking* sExtPxSerializerCooking; bool PxConvexMeshGeometryDTO::serialize(Nv::Blast::Serialization::PxConvexMeshGeometry::Builder builder, const physx::PxConvexMeshGeometry * poco) { NVBLAST_ASSERT(sExtPxSerializerCooking != nullptr); PxMeshScaleDTO::serialize(builder.getScale(), &poco->scale); //TODO: Use cooking.cookConvexMesh to cook the mesh to a stream - then get that backing buffer and put it into the Data field physx::PxConvexMeshDesc desc; desc.points.data = poco->convexMesh->getVertices(); desc.points.count = poco->convexMesh->getNbVertices(); desc.points.stride = sizeof(physx::PxVec3); std::vector<uint32_t> indicesScratch; std::vector<physx::PxHullPolygon> hullPolygonsScratch; hullPolygonsScratch.resize(poco->convexMesh->getNbPolygons()); uint32_t indexCount = 0; for (uint32_t i = 0; i < hullPolygonsScratch.size(); i++) { physx::PxHullPolygon polygon; poco->convexMesh->getPolygonData(i, polygon); if (polygon.mNbVerts) { indexCount = std::max<uint32_t>(indexCount, polygon.mIndexBase + polygon.mNbVerts); } } indicesScratch.resize(indexCount); for (uint32_t i = 0; i < hullPolygonsScratch.size(); i++) { physx::PxHullPolygon polygon; poco->convexMesh->getPolygonData(i, polygon); for (uint32_t j = 0; j < polygon.mNbVerts; j++) { indicesScratch[polygon.mIndexBase + j] = poco->convexMesh->getIndexBuffer()[polygon.mIndexBase + j]; } hullPolygonsScratch[i] = polygon; } desc.indices.count = indexCount; desc.indices.data = indicesScratch.data(); desc.indices.stride = sizeof(uint32_t); desc.polygons.count = poco->convexMesh->getNbPolygons(); desc.polygons.data = hullPolygonsScratch.data(); desc.polygons.stride = sizeof(physx::PxHullPolygon); physx::PxDefaultMemoryOutputStream outStream(NvBlastGetPxAllocatorCallback()); if (!sExtPxSerializerCooking->cookConvexMesh(desc, outStream)) { return false; } kj::ArrayPtr<unsigned char> cookedBuffer(outStream.getData(), outStream.getSize()); builder.setConvexMesh(cookedBuffer); return true; } physx::PxConvexMeshGeometry* PxConvexMeshGeometryDTO::deserialize(Nv::Blast::Serialization::PxConvexMeshGeometry::Reader reader) { NVBLAST_ASSERT(sExtPxSerializerCooking != nullptr); NV_UNUSED(reader); return nullptr; } bool PxConvexMeshGeometryDTO::deserializeInto(Nv::Blast::Serialization::PxConvexMeshGeometry::Reader reader, physx::PxConvexMeshGeometry * poco) { NVBLAST_ASSERT(sExtPxSerializerPhysics != nullptr); PxMeshScaleDTO::deserializeInto(reader.getScale(), &poco->scale); Nv::Blast::ExtKJPxInputStream inputStream(reader.getConvexMesh()); //NOTE: Naive approach, no shared convex hulls poco->convexMesh = sExtPxSerializerPhysics->createConvexMesh(inputStream); return poco->convexMesh != nullptr; } } // namespace Blast } // namespace Nv
5,081
C++
34.788732
146
0.731943
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/TkAssetJointDescDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "TkAssetJointDescDTO.h" #include "NvVec3DTO.h" namespace Nv { namespace Blast { bool TkAssetJointDescDTO::serialize(Nv::Blast::Serialization::TkAssetJointDesc::Builder builder, const Nv::Blast::TkAssetJointDesc * poco) { kj::ArrayPtr<const uint32_t> nodeIndices(poco->nodeIndices, 2); builder.setNodeIndices(nodeIndices); builder.initAttachPositions(2); for (int i = 0; i < 2; i++) { NvVec3DTO::serialize(builder.getAttachPositions()[i], &poco->attachPositions[i]); } return true; } Nv::Blast::TkAssetJointDesc* TkAssetJointDescDTO::deserialize(Nv::Blast::Serialization::TkAssetJointDesc::Reader reader) { //TODO: Allocate with ExtContent and return NV_UNUSED(reader); return nullptr; } bool TkAssetJointDescDTO::deserializeInto(Nv::Blast::Serialization::TkAssetJointDesc::Reader reader, Nv::Blast::TkAssetJointDesc * poco) { auto readerAttachPositions = reader.getAttachPositions(); NvVec3DTO::deserializeInto(readerAttachPositions[0], &poco->attachPositions[0]); NvVec3DTO::deserializeInto(readerAttachPositions[1], &poco->attachPositions[1]); auto readerNodeIndices = reader.getNodeIndices(); poco->nodeIndices[0] = readerNodeIndices[0]; poco->nodeIndices[1] = readerNodeIndices[1]; return true; } } // namespace Blast } // namespace Nv
2,911
C++
36.818181
138
0.74854
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/AssetDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "AssetDTO.h" #include "NvBlastGlobals.h" #include "NvBlastIDDTO.h" #include "NvBlastChunkDTO.h" #include "NvBlastBondDTO.h" #include "NvBlastAsset.h" namespace Nv { namespace Blast { bool AssetDTO::serialize(Nv::Blast::Serialization::Asset::Builder builder, const Nv::Blast::Asset * poco) { NvBlastIDDTO::serialize(builder.initID(), &poco->m_ID); builder.setLeafChunkCount(poco->m_leafChunkCount); builder.setFirstSubsupportChunkIndex(poco->m_firstSubsupportChunkIndex); capnp::List<Nv::Blast::Serialization::NvBlastChunk>::Builder chunks = builder.initChunks(poco->m_chunkCount); builder.setChunkCount(poco->m_chunkCount); NVBLAST_ASSERT_WITH_MESSAGE(builder.getChunkCount() == poco->m_chunkCount, "WTF"); for (uint32_t i = 0; i < poco->m_chunkCount; i++) { NvBlastChunk& chunk = poco->getChunks()[i]; NvBlastChunkDTO::serialize(chunks[i], &chunk); } NVBLAST_ASSERT_WITH_MESSAGE(builder.getChunkCount() == poco->m_chunkCount, "WTF"); capnp::List<Nv::Blast::Serialization::NvBlastBond>::Builder bonds = builder.initBonds(poco->m_bondCount); builder.setBondCount(poco->m_bondCount); for (uint32_t i = 0; i < poco->m_bondCount; i++) { NvBlastBond& bond = poco->getBonds()[i]; NvBlastBondDTO::serialize(bonds[i], &bond); } kj::ArrayPtr<uint32_t> stlcArray(poco->getSubtreeLeafChunkCounts(), poco->m_chunkCount); builder.initSubtreeLeafChunkCounts(poco->m_chunkCount); builder.setSubtreeLeafChunkCounts(stlcArray); kj::ArrayPtr<uint32_t> ctgnArray(poco->getChunkToGraphNodeMap(), poco->m_chunkCount); builder.setChunkToGraphNodeMap(ctgnArray); Nv::Blast::Serialization::NvBlastSupportGraph::Builder graphBulder = builder.initGraph(); graphBulder.setNodeCount(poco->m_graph.m_nodeCount); uint32_t* ciPtr = poco->m_graph.getChunkIndices(); kj::ArrayPtr<const uint32_t> ciArray(ciPtr, poco->m_graph.m_nodeCount); graphBulder.setChunkIndices(ciArray); kj::ArrayPtr<const uint32_t> adjPart(poco->m_graph.getAdjacencyPartition(), poco->m_graph.m_nodeCount + 1); graphBulder.setAdjacencyPartition(adjPart); NVBLAST_ASSERT(graphBulder.getAdjacencyPartition().size() == poco->m_graph.m_nodeCount + 1); kj::ArrayPtr<const uint32_t> nodeIndices(poco->m_graph.getAdjacentNodeIndices(), poco->m_bondCount * 2); graphBulder.setAdjacentNodeIndices(nodeIndices); NVBLAST_ASSERT(graphBulder.getAdjacentNodeIndices().size() == poco->m_bondCount * 2); kj::ArrayPtr<const uint32_t> bondIndices(poco->m_graph.getAdjacentBondIndices(), poco->m_bondCount * 2); graphBulder.setAdjacentBondIndices(bondIndices); return true; } Nv::Blast::Asset* AssetDTO::deserialize(Nv::Blast::Serialization::Asset::Reader reader) { NvBlastAssetMemSizeData sizeData; sizeData.chunkCount = reader.getChunkCount(); sizeData.nodeCount = reader.getGraph().getNodeCount(); sizeData.bondCount = reader.getBondCount(); const uint32_t leafChunkCount = reader.getLeafChunkCount(); const uint32_t firstSubsupportChunkIndex = reader.getFirstSubsupportChunkIndex(); const size_t assetSize = NvBlastGetAssetMemorySizeFromSizeData(sizeData, nullptr); void* mem = NVBLAST_ALLOC(assetSize); auto asset = Nv::Blast::initializeAsset(mem, sizeData.chunkCount, sizeData.nodeCount, leafChunkCount, firstSubsupportChunkIndex, sizeData.bondCount, logLL); if (deserializeInto(reader, asset)) return asset; // free the memory so it doesn't leak NVBLAST_FREE(asset); return nullptr; } bool AssetDTO::deserializeInto(Nv::Blast::Serialization::Asset::Reader reader, Nv::Blast::Asset * poco) { NvBlastIDDTO::deserializeInto(reader.getID(), &poco->m_ID); NvBlastBond* bonds = poco->getBonds(); uint32_t bondCount = reader.getBondCount(); auto readerBonds = reader.getBonds(); for (uint32_t i = 0; i < bondCount; i++) { auto bondReader = readerBonds[i]; NvBlastBondDTO::deserializeInto(bondReader, &bonds[i]); } NvBlastChunk* chunks = poco->getChunks(); uint32_t chunkCount = reader.getChunkCount(); auto readerChunks = reader.getChunks(); for (uint32_t i = 0; i < chunkCount; i++) { auto chunkReader = readerChunks[i]; NvBlastChunkDTO::deserializeInto(chunkReader, &chunks[i]); } poco->m_graph.m_nodeCount = reader.getGraph().getNodeCount(); NVBLAST_ASSERT(reader.getSubtreeLeafChunkCounts().size() == poco->m_chunkCount); auto readerSubtreeLeafChunkCounts = reader.getSubtreeLeafChunkCounts(); for (uint32_t i = 0; i < poco->m_chunkCount; i++) { poco->getSubtreeLeafChunkCounts()[i] = readerSubtreeLeafChunkCounts[i]; } auto readerChunkToGraphNodeMap = reader.getChunkToGraphNodeMap(); for (uint32_t i = 0; i < chunkCount; i++) { poco->getChunkToGraphNodeMap()[i] = readerChunkToGraphNodeMap[i]; } uint32_t* ciPtr = poco->m_graph.getChunkIndices(); NVBLAST_ASSERT(reader.getGraph().getChunkIndices().size() == poco->m_graph.m_nodeCount); auto readerGraphChunkIndices = reader.getGraph().getChunkIndices(); for (uint32_t i = 0; i < poco->m_graph.m_nodeCount; i++) { ciPtr[i] = readerGraphChunkIndices[i]; } uint32_t* adjPartition = poco->m_graph.getAdjacencyPartition(); const uint32_t graphAdjacencyPartitionSize = reader.getGraph().getAdjacencyPartition().size(); auto readerGraphAdjacencyPartition = reader.getGraph().getAdjacencyPartition(); for (uint32_t i = 0; i < graphAdjacencyPartitionSize; ++i) { adjPartition[i] = readerGraphAdjacencyPartition[i]; } uint32_t* adjNodes = poco->m_graph.getAdjacentNodeIndices(); const uint32_t graphAdjacentNodeIndicesSize = reader.getGraph().getAdjacentNodeIndices().size(); auto readerGraphAdjacentNodeIndices = reader.getGraph().getAdjacentNodeIndices(); for (uint32_t i = 0; i < graphAdjacentNodeIndicesSize; ++i) { adjNodes[i] = readerGraphAdjacentNodeIndices[i]; } uint32_t* adjBonds = poco->m_graph.getAdjacentBondIndices(); const uint32_t graphAdjacentBondIndicesSize = reader.getGraph().getAdjacentBondIndices().size(); auto readerGraphAdjacentBondIndices = reader.getGraph().getAdjacentBondIndices(); for (uint32_t i = 0; i < graphAdjacentBondIndicesSize; ++i) { adjBonds[i] = readerGraphAdjacentBondIndices[i]; } return true; } } // namespace Blast } // namespace Nv
8,072
C++
37.8125
160
0.718285
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastBondDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastBondDTO.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { bool NvBlastBondDTO::serialize(Nv::Blast::Serialization::NvBlastBond::Builder builder, const NvBlastBond * poco) { NVBLAST_ASSERT(poco != nullptr); kj::ArrayPtr<const float> normArray(poco->normal, 3); builder.setNormal(normArray); builder.setArea(poco->area); kj::ArrayPtr<const float> centArray(poco->centroid, 3); builder.setCentroid(centArray); builder.setUserData(poco->userData); return true; } NvBlastBond* NvBlastBondDTO::deserialize(Nv::Blast::Serialization::NvBlastBond::Reader reader) { //FIXME NV_UNUSED(reader); //TODO: Allocate with ExtContext and return return nullptr; } bool NvBlastBondDTO::deserializeInto(Nv::Blast::Serialization::NvBlastBond::Reader reader, NvBlastBond * poco) { poco->area = reader.getArea(); auto readerCentroid = reader.getCentroid(); poco->centroid[0] = readerCentroid[0]; poco->centroid[1] = readerCentroid[1]; poco->centroid[2] = readerCentroid[2]; auto readerNormal = reader.getNormal(); poco->normal[0] = readerNormal[0]; poco->normal[1] = readerNormal[1]; poco->normal[2] = readerNormal[2]; poco->userData = reader.getUserData(); return true; } } // namespace Blast } // namespace Nv
2,896
C++
32.686046
112
0.734116
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/TkAssetDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "TkAssetDTO.h" #include "AssetDTO.h" #include "TkAssetJointDescDTO.h" #include <vector> #include "NvBlastTkFramework.h" #include "NvBlastGlobals.h" namespace Nv { namespace Blast { extern TkFramework* sExtTkSerializerFramework; bool TkAssetDTO::serialize(Nv::Blast::Serialization::TkAsset::Builder builder, const Nv::Blast::TkAsset * poco) { const Asset* assetLL = reinterpret_cast<const Nv::Blast::Asset*>(poco->getAssetLL()); Nv::Blast::AssetDTO::serialize(builder.getAssetLL(), assetLL); uint32_t jointDescCount = poco->getJointDescCount(); capnp::List<Nv::Blast::Serialization::TkAssetJointDesc>::Builder jointDescs = builder.initJointDescs(jointDescCount); for (uint32_t i = 0; i < jointDescCount; i++) { TkAssetJointDescDTO::serialize(jointDescs[i], &poco->getJointDescs()[i]); } return true; } Nv::Blast::TkAsset* TkAssetDTO::deserialize(Nv::Blast::Serialization::TkAsset::Reader reader) { const NvBlastAsset* assetLL = reinterpret_cast<const NvBlastAsset*>(AssetDTO::deserialize(reader.getAssetLL())); std::vector<Nv::Blast::TkAssetJointDesc> jointDescs; const uint32_t jointDescCount = reader.getJointDescs().size(); jointDescs.resize(jointDescCount); auto readerJointDescs = reader.getJointDescs(); for (uint32_t i = 0; i < jointDescCount; i++) { TkAssetJointDescDTO::deserializeInto(readerJointDescs[i], &jointDescs[i]); } // Make sure to set ownsAsset to true - this is serialization and no one else owns it. Nv::Blast::TkAsset* asset = NvBlastTkFrameworkGet()->createAsset(assetLL, jointDescs.data(), jointDescCount, true); return asset; } bool TkAssetDTO::deserializeInto(Nv::Blast::Serialization::TkAsset::Reader reader, Nv::Blast::TkAsset * poco) { NV_UNUSED(reader); poco = nullptr; // NOTE: Because of the way TkAsset is currently structured, this won't work. return false; } } // namespace Blast } // namespace Nv
3,537
C++
36.638297
121
0.737913
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastChunkDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastChunkDTO.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { bool NvBlastChunkDTO::serialize(Nv::Blast::Serialization::NvBlastChunk::Builder builder, const NvBlastChunk* poco) { NVBLAST_ASSERT(poco != nullptr); kj::ArrayPtr<const float> centArray(poco->centroid, 3); builder.setCentroid(centArray); builder.setVolume(poco->volume); builder.setParentChunkIndex(poco->parentChunkIndex); builder.setFirstChildIndex(poco->firstChildIndex); builder.setChildIndexStop(poco->childIndexStop); builder.setUserData(poco->userData); return true; } NvBlastChunk* NvBlastChunkDTO::deserialize(Nv::Blast::Serialization::NvBlastChunk::Reader reader) { //FIXME NV_UNUSED(reader); return nullptr; } bool NvBlastChunkDTO::deserializeInto(Nv::Blast::Serialization::NvBlastChunk::Reader reader, NvBlastChunk* target) { NVBLAST_ASSERT(target != nullptr); auto readerCentroid = reader.getCentroid(); target->centroid[0] = readerCentroid[0]; target->centroid[1] = readerCentroid[1]; target->centroid[2] = readerCentroid[2]; target->childIndexStop = reader.getChildIndexStop(); target->firstChildIndex = reader.getFirstChildIndex(); target->parentChunkIndex = reader.getParentChunkIndex(); target->userData = reader.getUserData(); target->volume = reader.getVolume(); return true; } } // namespace Blast } // namespace Nv
3,001
C++
35.168674
114
0.748417
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/FamilyDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "FamilyDTO.h" #include "ActorDTO.h" #include "AssetDTO.h" #include "FamilyGraphDTO.h" #include "NvBlastFamilyGraph.h" #include "NvBlastGlobals.h" #include "NvBlastIDDTO.h" #include "NvBlastChunkDTO.h" #include "NvBlastBondDTO.h" #include <vector> namespace Nv { namespace Blast { bool FamilyDTO::serialize(Nv::Blast::Serialization::Family::Builder builder, const Nv::Blast::FamilyHeader* poco) { NvBlastIDDTO::serialize(builder.initAssetID(), &poco->m_assetID); // cache off the count data from the asset needed to re-create the family post serialization const NvBlastAssetMemSizeData sizeData = NvBlastAssetMemSizeDataFromAsset(poco->m_asset); builder.setBondCount(sizeData.bondCount); builder.setChunkCount(sizeData.chunkCount); builder.setNodeCount(sizeData.nodeCount); builder.setLowerSupportChunkCount(sizeData.lowerSupportChunkCount); builder.setUpperSupportChunkCount(sizeData.upperSupportChunkCount); // actorCount - these are active builder.setActorCount(poco->m_actorCount); // all possible actors const uint32_t actorCount = poco->getActorsArraySize(); capnp::List<Nv::Blast::Serialization::Actor>::Builder actors = builder.initActors(actorCount); for (uint32_t i = 0; i < actorCount; i++) { Actor& actor = poco->getActors()[i]; ActorDTO::serialize(actors[i], &actor); } // visibleChunkIndexLinks uint32_t* visibleChunkIndexLinks = reinterpret_cast<uint32_t *>(poco->getVisibleChunkIndexLinks()); kj::ArrayPtr<uint32_t> visibleChunkIndexLinksArray(visibleChunkIndexLinks, sizeData.chunkCount * 2); builder.setVisibleChunkIndexLinks(visibleChunkIndexLinksArray); // chunkActorIndices kj::ArrayPtr<uint32_t> chunkActorIndicesArray(poco->getChunkActorIndices(), sizeData.chunkCount); builder.setChunkActorIndices(chunkActorIndicesArray); // graphNodeIndexLinks kj::ArrayPtr<uint32_t> graphNodeIndexLinksArray(poco->getGraphNodeIndexLinks(), sizeData.chunkCount); builder.setGraphNodeIndexLinks(graphNodeIndexLinksArray); // lowerSupportChunkHealths kj::ArrayPtr<float> lowerSupportChunkHealthsArray(poco->getLowerSupportChunkHealths(), sizeData.chunkCount); builder.setLowerSupportChunkHealths(lowerSupportChunkHealthsArray); // graphBondHealths kj::ArrayPtr<float> graphBondHealthsArray(poco->getBondHealths(), sizeData.bondCount); builder.setGraphBondHealths(graphBondHealthsArray); // familyGraph FamilyGraph *graph = poco->getFamilyGraph(); auto builderGraph = builder.initFamilyGraph(); builderGraph.setNodeCount(sizeData.nodeCount); FamilyGraphDTO::serialize(builderGraph, graph); return true; } Nv::Blast::FamilyHeader* FamilyDTO::deserialize(Nv::Blast::Serialization::Family::Reader reader) { // fill in the count info from the reader NvBlastAssetMemSizeData sizeData; sizeData.bondCount = reader.getBondCount(); sizeData.chunkCount = reader.getChunkCount(); sizeData.nodeCount = reader.getNodeCount(); sizeData.lowerSupportChunkCount = reader.getLowerSupportChunkCount(); sizeData.upperSupportChunkCount = reader.getUpperSupportChunkCount(); // allocate enough space to hold the family const size_t familySize = NvBlastAssetGetFamilyMemorySizeFromSizeData(sizeData, nullptr); void* mem = NVBLAST_ALLOC(familySize); // use the count info to initialize the family auto family = reinterpret_cast<Nv::Blast::FamilyHeader *>(NvBlastAssetCreateFamilyFromSizeData(mem, sizeData, Nv::Blast::logLL)); // then fill in the data from the reader if (deserializeInto(reader, family)) return family; // failed to deserialize, free the allocated memory so it doesn't leak NVBLAST_FREE(mem); return nullptr; } bool FamilyDTO::deserializeInto(Nv::Blast::Serialization::Family::Reader reader, Nv::Blast::FamilyHeader* poco) { NvBlastIDDTO::deserializeInto(reader.getAssetID(), &poco->m_assetID); // active actor count poco->m_actorCount = reader.getActorCount(); // all possible actors Actor* actors = poco->getActors(); auto readerActors = reader.getActors(); NVBLAST_ASSERT(poco->m_actorCount <= readerActors.size()); for (uint32_t i = 0; i < readerActors.size(); i++) { auto actorReader = readerActors[i]; ActorDTO::deserializeInto(actorReader, &actors[i]); } // visibleChunkIndexLinks // they are stored in the buffer as a flat list of uint32_t values, // but stored as pairs in the Family auto readerVisibleChunkIndexLinks = reader.getVisibleChunkIndexLinks(); const uint32_t numVisibleChunkIndexLinks = readerVisibleChunkIndexLinks.size(); for (uint32_t i = 0; i < numVisibleChunkIndexLinks; i += 2) { const uint32_t vcil = i / 2; poco->getVisibleChunkIndexLinks()[vcil].m_adj[0] = readerVisibleChunkIndexLinks[i]; poco->getVisibleChunkIndexLinks()[vcil].m_adj[1] = readerVisibleChunkIndexLinks[i+1]; } // chunkActorIndices auto readerChunkActorIndices = reader.getChunkActorIndices(); const uint32_t numChunkActorIndices = readerChunkActorIndices.size(); for (uint32_t i = 0; i < numChunkActorIndices; i++) { poco->getChunkActorIndices()[i] = readerChunkActorIndices[i]; } // graphNodeIndexLinks auto readerGraphNodeIndexLinks = reader.getGraphNodeIndexLinks(); const uint32_t numGraphNodeIndexLinks = readerGraphNodeIndexLinks.size(); for (uint32_t i = 0; i < numGraphNodeIndexLinks; i++) { poco->getGraphNodeIndexLinks()[i] = readerGraphNodeIndexLinks[i]; } // lowerSupportChunkHealths auto readerLowerSupportChunkHealths = reader.getLowerSupportChunkHealths(); const uint32_t numLowerSupportChunkHealths = readerLowerSupportChunkHealths.size(); for (uint32_t i = 0; i < numLowerSupportChunkHealths; i++) { poco->getLowerSupportChunkHealths()[i] = readerLowerSupportChunkHealths[i]; } // graphBondHealths auto readerGraphBondHealths = reader.getGraphBondHealths(); const uint32_t numGraphBondHealths = readerGraphBondHealths.size(); for (uint32_t i = 0; i < numGraphBondHealths; i++) { poco->getBondHealths()[i] = readerGraphBondHealths[i]; } // familyGraph FamilyGraphDTO::deserializeInto(reader.getFamilyGraph(), poco->getFamilyGraph()); return true; } } // namespace Blast } // namespace Nv
8,006
C++
39.64467
133
0.738696
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/FamilyGraphDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "FamilyGraphDTO.h" #include "NvBlastGlobals.h" namespace Nv { namespace Blast { bool FamilyGraphDTO::serialize(Nv::Blast::Serialization::FamilyGraph::Builder builder, const Nv::Blast::FamilyGraph * poco) { // this needs to be set externally so we have access to it here const uint32_t nodeCount = builder.getNodeCount(); kj::ArrayPtr<IslandId> islandIdsArray(poco->getIslandIds(), nodeCount); builder.setIslandIds(islandIdsArray); kj::ArrayPtr<NodeIndex> dirtyNodeLinksArray(poco->getDirtyNodeLinks(), nodeCount); builder.setDirtyNodeLinks(dirtyNodeLinksArray); kj::ArrayPtr<uint32_t> firstDirtyNodeIndicesArray(poco->getFirstDirtyNodeIndices(), nodeCount); builder.setFirstDirtyNodeIndices(firstDirtyNodeIndicesArray); kj::ArrayPtr<NodeIndex> fastRouteArray(poco->getFastRoute(), nodeCount); builder.setFastRoute(fastRouteArray); kj::ArrayPtr<uint32_t> hopCountsArray(poco->getHopCounts(), nodeCount); builder.setHopCounts(hopCountsArray); auto isEdgeRemoved = poco->getIsEdgeRemoved(); uint8_t* isEdgeRemovedData = reinterpret_cast<uint8_t*>(const_cast<char*>(isEdgeRemoved->getData())); capnp::Data::Reader isEdgeRemovedReader(isEdgeRemovedData, isEdgeRemoved->getSize()); builder.setIsEdgeRemoved(isEdgeRemovedReader); auto isNodeInDirtyList = poco->getIsNodeInDirtyList(); uint8_t* isNodeInDirtyListData = reinterpret_cast<uint8_t*>(const_cast<char*>(isNodeInDirtyList->getData())); capnp::Data::Reader isNodeInDirtyListReader(isNodeInDirtyListData, isNodeInDirtyList->getSize()); builder.setIsNodeInDirtyList(isNodeInDirtyListReader); return true; } Nv::Blast::FamilyGraph* FamilyGraphDTO::deserialize(Nv::Blast::Serialization::FamilyGraph::Reader reader) { NV_UNUSED(reader); return nullptr; } bool FamilyGraphDTO::deserializeInto(Nv::Blast::Serialization::FamilyGraph::Reader reader, Nv::Blast::FamilyGraph * poco) { auto readerIslandIds = reader.getIslandIds(); const uint32_t numIslandIds = readerIslandIds.size(); for (uint32_t i = 0; i < numIslandIds; i++) { poco->getIslandIds()[i] = readerIslandIds[i]; } auto readerDirtyNodeLinks = reader.getDirtyNodeLinks(); const uint32_t numDirtyNodeLinks = readerDirtyNodeLinks.size(); for (uint32_t i = 0; i < numDirtyNodeLinks; i++) { poco->getDirtyNodeLinks()[i] = readerDirtyNodeLinks[i]; } auto readerFirstDirtyNodeIndices = reader.getFirstDirtyNodeIndices(); const uint32_t numFirstDirtyNodeIndices = readerFirstDirtyNodeIndices.size(); for (uint32_t i = 0; i < numFirstDirtyNodeIndices; i++) { poco->getFirstDirtyNodeIndices()[i] = readerFirstDirtyNodeIndices[i]; } auto readerFastRoute = reader.getFastRoute(); const uint32_t numFastRoute = readerFastRoute.size(); for (uint32_t i = 0; i < numFastRoute; i++) { poco->getFastRoute()[i] = readerFastRoute[i]; } auto readerHopCounts = reader.getHopCounts(); const uint32_t numHopCounts = readerHopCounts.size(); for (uint32_t i = 0; i < numHopCounts; i++) { poco->getHopCounts()[i] = readerHopCounts[i]; } auto readerIsEdgeRemoved = reader.getIsEdgeRemoved(); const uint32_t numIsEdgeRemoved = readerIsEdgeRemoved.size(); const char* isEdgeRemovedData = reinterpret_cast<const char*>(readerIsEdgeRemoved.begin()); auto isEdgeRemoved = poco->getIsEdgeRemoved(); isEdgeRemoved->setData(isEdgeRemovedData, numIsEdgeRemoved); auto readerIsNodeInDirtyList = reader.getIsNodeInDirtyList(); const uint32_t numIsNodeInDirtyList = readerIsNodeInDirtyList.size(); const char* readerIsNodeInDirtyListData = reinterpret_cast<const char*>(readerIsNodeInDirtyList.begin()); auto isNodeInDirtyList = poco->getIsNodeInDirtyList(); isNodeInDirtyList->setData(readerIsNodeInDirtyListData, numIsNodeInDirtyList); return true; } } // namespace Blast } // namespace Nv
5,532
C++
40.916666
123
0.743312
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastIDDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastIDDTO.h" #include "NvBlastTypes.h" #include "NvBlastAssert.h" #include "NvBlastExtLlSerialization-capn.h" namespace Nv { namespace Blast { bool NvBlastIDDTO::serialize(Nv::Blast::Serialization::UUID::Builder builder, const NvBlastID * poco) { capnp::Data::Reader idArrayReader((unsigned char *)poco->data, 16); builder.setValue(idArrayReader); return true; } NvBlastID* NvBlastIDDTO::deserialize(Nv::Blast::Serialization::UUID::Reader reader) { //FIXME NV_UNUSED(reader); //TODO: Allocate with ExtContext and return return nullptr; } bool NvBlastIDDTO::deserializeInto(Nv::Blast::Serialization::UUID::Reader reader, NvBlastID * poco) { NVBLAST_ASSERT_WITH_MESSAGE(reader.getValue().size() == 16, "BlastID must be 16 bytes"); memcpy(poco, reader.getValue().begin(), 16); return true; } } // namespace Blast } // namespace Nv
2,466
C++
34.242857
101
0.745742
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/generated/NvBlastExtLlSerialization-capn.h
// Generated by Cap'n Proto compiler, DO NOT EDIT // source: NvBlastExtLlSerialization-capn #ifndef CAPNP_INCLUDED_9a4a58fac38375e0_ #define CAPNP_INCLUDED_9a4a58fac38375e0_ #include <capnp/generated-header-support.h> #if CAPNP_VERSION != 6001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif namespace capnp { namespace schemas { CAPNP_DECLARE_SCHEMA(ce4f8468c36f427d); CAPNP_DECLARE_SCHEMA(fe6948a9a6a3eff5); CAPNP_DECLARE_SCHEMA(d20ccbe36dd9711d); CAPNP_DECLARE_SCHEMA(8a38616881ef8310); CAPNP_DECLARE_SCHEMA(d5e1a9fb31b1350d); CAPNP_DECLARE_SCHEMA(b292bd608606f041); enum class Type_b292bd608606f041: uint16_t { ASSET_DATA_BLOCK, INSTANCE_DATA_BLOCK, }; CAPNP_DECLARE_ENUM(Type, b292bd608606f041); CAPNP_DECLARE_SCHEMA(92818c664a7b1aba); CAPNP_DECLARE_SCHEMA(c43da43c95eada67); CAPNP_DECLARE_SCHEMA(f018cbfcaacb3a55); CAPNP_DECLARE_SCHEMA(bfd00835cc19bf3a); } // namespace schemas } // namespace capnp namespace Nv { namespace Blast { namespace Serialization { struct Asset { Asset() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(ce4f8468c36f427d, 2, 7) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct Family { Family() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(fe6948a9a6a3eff5, 3, 8) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct Actor { Actor() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(d20ccbe36dd9711d, 3, 0) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct FamilyGraph { FamilyGraph() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(8a38616881ef8310, 1, 7) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastDataBlock { NvBlastDataBlock() = delete; class Reader; class Builder; class Pipeline; typedef ::capnp::schemas::Type_b292bd608606f041 Type; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(d5e1a9fb31b1350d, 2, 0) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastChunk { NvBlastChunk() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(92818c664a7b1aba, 3, 1) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastBond { NvBlastBond() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(c43da43c95eada67, 1, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastSupportGraph { NvBlastSupportGraph() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(f018cbfcaacb3a55, 1, 4) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct UUID { UUID() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(bfd00835cc19bf3a, 0, 1) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; // ======================================================================================= class Asset::Reader { public: typedef Asset Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasHeader() const; inline ::Nv::Blast::Serialization::NvBlastDataBlock::Reader getHeader() const; inline bool hasID() const; inline ::Nv::Blast::Serialization::UUID::Reader getID() const; inline ::uint32_t getChunkCount() const; inline bool hasGraph() const; inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader getGraph() const; inline ::uint32_t getLeafChunkCount() const; inline ::uint32_t getFirstSubsupportChunkIndex() const; inline ::uint32_t getBondCount() const; inline bool hasChunks() const; inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader getChunks() const; inline bool hasBonds() const; inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader getBonds() const; inline bool hasSubtreeLeafChunkCounts() const; inline ::capnp::List< ::uint32_t>::Reader getSubtreeLeafChunkCounts() const; inline bool hasChunkToGraphNodeMap() const; inline ::capnp::List< ::uint32_t>::Reader getChunkToGraphNodeMap() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class Asset::Builder { public: typedef Asset Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasHeader(); inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder getHeader(); inline void setHeader( ::Nv::Blast::Serialization::NvBlastDataBlock::Reader value); inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder initHeader(); inline void adoptHeader(::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock> disownHeader(); inline bool hasID(); inline ::Nv::Blast::Serialization::UUID::Builder getID(); inline void setID( ::Nv::Blast::Serialization::UUID::Reader value); inline ::Nv::Blast::Serialization::UUID::Builder initID(); inline void adoptID(::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> disownID(); inline ::uint32_t getChunkCount(); inline void setChunkCount( ::uint32_t value); inline bool hasGraph(); inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder getGraph(); inline void setGraph( ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader value); inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder initGraph(); inline void adoptGraph(::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph> disownGraph(); inline ::uint32_t getLeafChunkCount(); inline void setLeafChunkCount( ::uint32_t value); inline ::uint32_t getFirstSubsupportChunkIndex(); inline void setFirstSubsupportChunkIndex( ::uint32_t value); inline ::uint32_t getBondCount(); inline void setBondCount( ::uint32_t value); inline bool hasChunks(); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder getChunks(); inline void setChunks( ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder initChunks(unsigned int size); inline void adoptChunks(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>> disownChunks(); inline bool hasBonds(); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder getBonds(); inline void setBonds( ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder initBonds(unsigned int size); inline void adoptBonds(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>> disownBonds(); inline bool hasSubtreeLeafChunkCounts(); inline ::capnp::List< ::uint32_t>::Builder getSubtreeLeafChunkCounts(); inline void setSubtreeLeafChunkCounts( ::capnp::List< ::uint32_t>::Reader value); inline void setSubtreeLeafChunkCounts(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initSubtreeLeafChunkCounts(unsigned int size); inline void adoptSubtreeLeafChunkCounts(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownSubtreeLeafChunkCounts(); inline bool hasChunkToGraphNodeMap(); inline ::capnp::List< ::uint32_t>::Builder getChunkToGraphNodeMap(); inline void setChunkToGraphNodeMap( ::capnp::List< ::uint32_t>::Reader value); inline void setChunkToGraphNodeMap(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initChunkToGraphNodeMap(unsigned int size); inline void adoptChunkToGraphNodeMap(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownChunkToGraphNodeMap(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class Asset::Pipeline { public: typedef Asset Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} inline ::Nv::Blast::Serialization::NvBlastDataBlock::Pipeline getHeader(); inline ::Nv::Blast::Serialization::UUID::Pipeline getID(); inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Pipeline getGraph(); private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class Family::Reader { public: typedef Family Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasAssetID() const; inline ::Nv::Blast::Serialization::UUID::Reader getAssetID() const; inline bool hasActors() const; inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader getActors() const; inline bool hasVisibleChunkIndexLinks() const; inline ::capnp::List< ::uint32_t>::Reader getVisibleChunkIndexLinks() const; inline bool hasChunkActorIndices() const; inline ::capnp::List< ::uint32_t>::Reader getChunkActorIndices() const; inline bool hasGraphNodeIndexLinks() const; inline ::capnp::List< ::uint32_t>::Reader getGraphNodeIndexLinks() const; inline bool hasLowerSupportChunkHealths() const; inline ::capnp::List<float>::Reader getLowerSupportChunkHealths() const; inline bool hasGraphBondHealths() const; inline ::capnp::List<float>::Reader getGraphBondHealths() const; inline bool hasFamilyGraph() const; inline ::Nv::Blast::Serialization::FamilyGraph::Reader getFamilyGraph() const; inline ::uint32_t getActorCount() const; inline ::uint32_t getBondCount() const; inline ::uint32_t getChunkCount() const; inline ::uint32_t getNodeCount() const; inline ::uint32_t getLowerSupportChunkCount() const; inline ::uint32_t getUpperSupportChunkCount() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class Family::Builder { public: typedef Family Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasAssetID(); inline ::Nv::Blast::Serialization::UUID::Builder getAssetID(); inline void setAssetID( ::Nv::Blast::Serialization::UUID::Reader value); inline ::Nv::Blast::Serialization::UUID::Builder initAssetID(); inline void adoptAssetID(::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> disownAssetID(); inline bool hasActors(); inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder getActors(); inline void setActors( ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder initActors(unsigned int size); inline void adoptActors(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>> disownActors(); inline bool hasVisibleChunkIndexLinks(); inline ::capnp::List< ::uint32_t>::Builder getVisibleChunkIndexLinks(); inline void setVisibleChunkIndexLinks( ::capnp::List< ::uint32_t>::Reader value); inline void setVisibleChunkIndexLinks(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initVisibleChunkIndexLinks(unsigned int size); inline void adoptVisibleChunkIndexLinks(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownVisibleChunkIndexLinks(); inline bool hasChunkActorIndices(); inline ::capnp::List< ::uint32_t>::Builder getChunkActorIndices(); inline void setChunkActorIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setChunkActorIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initChunkActorIndices(unsigned int size); inline void adoptChunkActorIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownChunkActorIndices(); inline bool hasGraphNodeIndexLinks(); inline ::capnp::List< ::uint32_t>::Builder getGraphNodeIndexLinks(); inline void setGraphNodeIndexLinks( ::capnp::List< ::uint32_t>::Reader value); inline void setGraphNodeIndexLinks(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initGraphNodeIndexLinks(unsigned int size); inline void adoptGraphNodeIndexLinks(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownGraphNodeIndexLinks(); inline bool hasLowerSupportChunkHealths(); inline ::capnp::List<float>::Builder getLowerSupportChunkHealths(); inline void setLowerSupportChunkHealths( ::capnp::List<float>::Reader value); inline void setLowerSupportChunkHealths(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initLowerSupportChunkHealths(unsigned int size); inline void adoptLowerSupportChunkHealths(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownLowerSupportChunkHealths(); inline bool hasGraphBondHealths(); inline ::capnp::List<float>::Builder getGraphBondHealths(); inline void setGraphBondHealths( ::capnp::List<float>::Reader value); inline void setGraphBondHealths(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initGraphBondHealths(unsigned int size); inline void adoptGraphBondHealths(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownGraphBondHealths(); inline bool hasFamilyGraph(); inline ::Nv::Blast::Serialization::FamilyGraph::Builder getFamilyGraph(); inline void setFamilyGraph( ::Nv::Blast::Serialization::FamilyGraph::Reader value); inline ::Nv::Blast::Serialization::FamilyGraph::Builder initFamilyGraph(); inline void adoptFamilyGraph(::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph> disownFamilyGraph(); inline ::uint32_t getActorCount(); inline void setActorCount( ::uint32_t value); inline ::uint32_t getBondCount(); inline void setBondCount( ::uint32_t value); inline ::uint32_t getChunkCount(); inline void setChunkCount( ::uint32_t value); inline ::uint32_t getNodeCount(); inline void setNodeCount( ::uint32_t value); inline ::uint32_t getLowerSupportChunkCount(); inline void setLowerSupportChunkCount( ::uint32_t value); inline ::uint32_t getUpperSupportChunkCount(); inline void setUpperSupportChunkCount( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class Family::Pipeline { public: typedef Family Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} inline ::Nv::Blast::Serialization::UUID::Pipeline getAssetID(); inline ::Nv::Blast::Serialization::FamilyGraph::Pipeline getFamilyGraph(); private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class Actor::Reader { public: typedef Actor Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline ::uint32_t getFamilyOffset() const; inline ::uint32_t getFirstVisibleChunkIndex() const; inline ::uint32_t getVisibleChunkCount() const; inline ::uint32_t getFirstGraphNodeIndex() const; inline ::uint32_t getGraphNodeCount() const; inline ::uint32_t getLeafChunkCount() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class Actor::Builder { public: typedef Actor Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline ::uint32_t getFamilyOffset(); inline void setFamilyOffset( ::uint32_t value); inline ::uint32_t getFirstVisibleChunkIndex(); inline void setFirstVisibleChunkIndex( ::uint32_t value); inline ::uint32_t getVisibleChunkCount(); inline void setVisibleChunkCount( ::uint32_t value); inline ::uint32_t getFirstGraphNodeIndex(); inline void setFirstGraphNodeIndex( ::uint32_t value); inline ::uint32_t getGraphNodeCount(); inline void setGraphNodeCount( ::uint32_t value); inline ::uint32_t getLeafChunkCount(); inline void setLeafChunkCount( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class Actor::Pipeline { public: typedef Actor Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class FamilyGraph::Reader { public: typedef FamilyGraph Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasIslandIds() const; inline ::capnp::List< ::uint32_t>::Reader getIslandIds() const; inline bool hasDirtyNodeLinks() const; inline ::capnp::List< ::uint32_t>::Reader getDirtyNodeLinks() const; inline bool hasFirstDirtyNodeIndices() const; inline ::capnp::List< ::uint32_t>::Reader getFirstDirtyNodeIndices() const; inline bool hasFastRoute() const; inline ::capnp::List< ::uint32_t>::Reader getFastRoute() const; inline bool hasHopCounts() const; inline ::capnp::List< ::uint32_t>::Reader getHopCounts() const; inline bool hasIsEdgeRemoved() const; inline ::capnp::Data::Reader getIsEdgeRemoved() const; inline bool hasIsNodeInDirtyList() const; inline ::capnp::Data::Reader getIsNodeInDirtyList() const; inline ::uint32_t getNodeCount() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class FamilyGraph::Builder { public: typedef FamilyGraph Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasIslandIds(); inline ::capnp::List< ::uint32_t>::Builder getIslandIds(); inline void setIslandIds( ::capnp::List< ::uint32_t>::Reader value); inline void setIslandIds(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initIslandIds(unsigned int size); inline void adoptIslandIds(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownIslandIds(); inline bool hasDirtyNodeLinks(); inline ::capnp::List< ::uint32_t>::Builder getDirtyNodeLinks(); inline void setDirtyNodeLinks( ::capnp::List< ::uint32_t>::Reader value); inline void setDirtyNodeLinks(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initDirtyNodeLinks(unsigned int size); inline void adoptDirtyNodeLinks(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownDirtyNodeLinks(); inline bool hasFirstDirtyNodeIndices(); inline ::capnp::List< ::uint32_t>::Builder getFirstDirtyNodeIndices(); inline void setFirstDirtyNodeIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setFirstDirtyNodeIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initFirstDirtyNodeIndices(unsigned int size); inline void adoptFirstDirtyNodeIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownFirstDirtyNodeIndices(); inline bool hasFastRoute(); inline ::capnp::List< ::uint32_t>::Builder getFastRoute(); inline void setFastRoute( ::capnp::List< ::uint32_t>::Reader value); inline void setFastRoute(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initFastRoute(unsigned int size); inline void adoptFastRoute(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownFastRoute(); inline bool hasHopCounts(); inline ::capnp::List< ::uint32_t>::Builder getHopCounts(); inline void setHopCounts( ::capnp::List< ::uint32_t>::Reader value); inline void setHopCounts(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initHopCounts(unsigned int size); inline void adoptHopCounts(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownHopCounts(); inline bool hasIsEdgeRemoved(); inline ::capnp::Data::Builder getIsEdgeRemoved(); inline void setIsEdgeRemoved( ::capnp::Data::Reader value); inline ::capnp::Data::Builder initIsEdgeRemoved(unsigned int size); inline void adoptIsEdgeRemoved(::capnp::Orphan< ::capnp::Data>&& value); inline ::capnp::Orphan< ::capnp::Data> disownIsEdgeRemoved(); inline bool hasIsNodeInDirtyList(); inline ::capnp::Data::Builder getIsNodeInDirtyList(); inline void setIsNodeInDirtyList( ::capnp::Data::Reader value); inline ::capnp::Data::Builder initIsNodeInDirtyList(unsigned int size); inline void adoptIsNodeInDirtyList(::capnp::Orphan< ::capnp::Data>&& value); inline ::capnp::Orphan< ::capnp::Data> disownIsNodeInDirtyList(); inline ::uint32_t getNodeCount(); inline void setNodeCount( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class FamilyGraph::Pipeline { public: typedef FamilyGraph Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastDataBlock::Reader { public: typedef NvBlastDataBlock Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type getDataType() const; inline ::uint32_t getFormatVersion() const; inline ::uint32_t getSize() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastDataBlock::Builder { public: typedef NvBlastDataBlock Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type getDataType(); inline void setDataType( ::Nv::Blast::Serialization::NvBlastDataBlock::Type value); inline ::uint32_t getFormatVersion(); inline void setFormatVersion( ::uint32_t value); inline ::uint32_t getSize(); inline void setSize( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastDataBlock::Pipeline { public: typedef NvBlastDataBlock Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastChunk::Reader { public: typedef NvBlastChunk Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasCentroid() const; inline ::capnp::List<float>::Reader getCentroid() const; inline float getVolume() const; inline ::uint32_t getParentChunkIndex() const; inline ::uint32_t getFirstChildIndex() const; inline ::uint32_t getChildIndexStop() const; inline ::uint32_t getUserData() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastChunk::Builder { public: typedef NvBlastChunk Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasCentroid(); inline ::capnp::List<float>::Builder getCentroid(); inline void setCentroid( ::capnp::List<float>::Reader value); inline void setCentroid(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initCentroid(unsigned int size); inline void adoptCentroid(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownCentroid(); inline float getVolume(); inline void setVolume(float value); inline ::uint32_t getParentChunkIndex(); inline void setParentChunkIndex( ::uint32_t value); inline ::uint32_t getFirstChildIndex(); inline void setFirstChildIndex( ::uint32_t value); inline ::uint32_t getChildIndexStop(); inline void setChildIndexStop( ::uint32_t value); inline ::uint32_t getUserData(); inline void setUserData( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastChunk::Pipeline { public: typedef NvBlastChunk Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastBond::Reader { public: typedef NvBlastBond Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasNormal() const; inline ::capnp::List<float>::Reader getNormal() const; inline float getArea() const; inline bool hasCentroid() const; inline ::capnp::List<float>::Reader getCentroid() const; inline ::uint32_t getUserData() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastBond::Builder { public: typedef NvBlastBond Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasNormal(); inline ::capnp::List<float>::Builder getNormal(); inline void setNormal( ::capnp::List<float>::Reader value); inline void setNormal(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initNormal(unsigned int size); inline void adoptNormal(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownNormal(); inline float getArea(); inline void setArea(float value); inline bool hasCentroid(); inline ::capnp::List<float>::Builder getCentroid(); inline void setCentroid( ::capnp::List<float>::Reader value); inline void setCentroid(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initCentroid(unsigned int size); inline void adoptCentroid(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownCentroid(); inline ::uint32_t getUserData(); inline void setUserData( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastBond::Pipeline { public: typedef NvBlastBond Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastSupportGraph::Reader { public: typedef NvBlastSupportGraph Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline ::uint32_t getNodeCount() const; inline bool hasChunkIndices() const; inline ::capnp::List< ::uint32_t>::Reader getChunkIndices() const; inline bool hasAdjacencyPartition() const; inline ::capnp::List< ::uint32_t>::Reader getAdjacencyPartition() const; inline bool hasAdjacentNodeIndices() const; inline ::capnp::List< ::uint32_t>::Reader getAdjacentNodeIndices() const; inline bool hasAdjacentBondIndices() const; inline ::capnp::List< ::uint32_t>::Reader getAdjacentBondIndices() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastSupportGraph::Builder { public: typedef NvBlastSupportGraph Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline ::uint32_t getNodeCount(); inline void setNodeCount( ::uint32_t value); inline bool hasChunkIndices(); inline ::capnp::List< ::uint32_t>::Builder getChunkIndices(); inline void setChunkIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setChunkIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initChunkIndices(unsigned int size); inline void adoptChunkIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownChunkIndices(); inline bool hasAdjacencyPartition(); inline ::capnp::List< ::uint32_t>::Builder getAdjacencyPartition(); inline void setAdjacencyPartition( ::capnp::List< ::uint32_t>::Reader value); inline void setAdjacencyPartition(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initAdjacencyPartition(unsigned int size); inline void adoptAdjacencyPartition(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownAdjacencyPartition(); inline bool hasAdjacentNodeIndices(); inline ::capnp::List< ::uint32_t>::Builder getAdjacentNodeIndices(); inline void setAdjacentNodeIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setAdjacentNodeIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initAdjacentNodeIndices(unsigned int size); inline void adoptAdjacentNodeIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownAdjacentNodeIndices(); inline bool hasAdjacentBondIndices(); inline ::capnp::List< ::uint32_t>::Builder getAdjacentBondIndices(); inline void setAdjacentBondIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setAdjacentBondIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initAdjacentBondIndices(unsigned int size); inline void adoptAdjacentBondIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownAdjacentBondIndices(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastSupportGraph::Pipeline { public: typedef NvBlastSupportGraph Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class UUID::Reader { public: typedef UUID Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasValue() const; inline ::capnp::Data::Reader getValue() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class UUID::Builder { public: typedef UUID Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasValue(); inline ::capnp::Data::Builder getValue(); inline void setValue( ::capnp::Data::Reader value); inline ::capnp::Data::Builder initValue(unsigned int size); inline void adoptValue(::capnp::Orphan< ::capnp::Data>&& value); inline ::capnp::Orphan< ::capnp::Data> disownValue(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class UUID::Pipeline { public: typedef UUID Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE // ======================================================================================= inline bool Asset::Reader::hasHeader() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasHeader() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Reader Asset::Reader::getHeader() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder Asset::Builder::getHeader() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastDataBlock::Pipeline Asset::Pipeline::getHeader() { return ::Nv::Blast::Serialization::NvBlastDataBlock::Pipeline(_typeless.getPointerField(0)); } #endif // !CAPNP_LITE inline void Asset::Builder::setHeader( ::Nv::Blast::Serialization::NvBlastDataBlock::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder Asset::Builder::initHeader() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void Asset::Builder::adoptHeader( ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock> Asset::Builder::disownHeader() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasID() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasID() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::UUID::Reader Asset::Reader::getID() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::UUID::Builder Asset::Builder::getID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::UUID::Pipeline Asset::Pipeline::getID() { return ::Nv::Blast::Serialization::UUID::Pipeline(_typeless.getPointerField(1)); } #endif // !CAPNP_LITE inline void Asset::Builder::setID( ::Nv::Blast::Serialization::UUID::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::UUID::Builder Asset::Builder::initID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void Asset::Builder::adoptID( ::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> Asset::Builder::disownID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::uint32_t Asset::Reader::getChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline bool Asset::Reader::hasGraph() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasGraph() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader Asset::Reader::getGraph() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder Asset::Builder::getGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Pipeline Asset::Pipeline::getGraph() { return ::Nv::Blast::Serialization::NvBlastSupportGraph::Pipeline(_typeless.getPointerField(2)); } #endif // !CAPNP_LITE inline void Asset::Builder::setGraph( ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder Asset::Builder::initGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void Asset::Builder::adoptGraph( ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph> Asset::Builder::disownGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::uint32_t Asset::Reader::getLeafChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getLeafChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setLeafChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Asset::Reader::getFirstSubsupportChunkIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getFirstSubsupportChunkIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setFirstSubsupportChunkIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Asset::Reader::getBondCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getBondCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setBondCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline bool Asset::Reader::hasChunks() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasChunks() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader Asset::Reader::getChunks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder Asset::Builder::getChunks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void Asset::Builder::setChunks( ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder Asset::Builder::initChunks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptChunks( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>> Asset::Builder::disownChunks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasBonds() const { return !_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasBonds() { return !_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader Asset::Reader::getBonds() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::get(_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder Asset::Builder::getBonds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::get(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline void Asset::Builder::setBonds( ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder Asset::Builder::initBonds(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::init(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptBonds( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::adopt(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>> Asset::Builder::disownBonds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::disown(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasSubtreeLeafChunkCounts() const { return !_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasSubtreeLeafChunkCounts() { return !_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Asset::Reader::getSubtreeLeafChunkCounts() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::getSubtreeLeafChunkCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline void Asset::Builder::setSubtreeLeafChunkCounts( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline void Asset::Builder::setSubtreeLeafChunkCounts(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::initSubtreeLeafChunkCounts(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptSubtreeLeafChunkCounts( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Asset::Builder::disownSubtreeLeafChunkCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasChunkToGraphNodeMap() const { return !_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasChunkToGraphNodeMap() { return !_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Asset::Reader::getChunkToGraphNodeMap() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::getChunkToGraphNodeMap() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline void Asset::Builder::setChunkToGraphNodeMap( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline void Asset::Builder::setChunkToGraphNodeMap(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::initChunkToGraphNodeMap(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptChunkToGraphNodeMap( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Asset::Builder::disownChunkToGraphNodeMap() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasAssetID() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasAssetID() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::UUID::Reader Family::Reader::getAssetID() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::UUID::Builder Family::Builder::getAssetID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::UUID::Pipeline Family::Pipeline::getAssetID() { return ::Nv::Blast::Serialization::UUID::Pipeline(_typeless.getPointerField(0)); } #endif // !CAPNP_LITE inline void Family::Builder::setAssetID( ::Nv::Blast::Serialization::UUID::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::UUID::Builder Family::Builder::initAssetID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void Family::Builder::adoptAssetID( ::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> Family::Builder::disownAssetID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasActors() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasActors() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader Family::Reader::getActors() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder Family::Builder::getActors() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void Family::Builder::setActors( ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder Family::Builder::initActors(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptActors( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>> Family::Builder::disownActors() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasVisibleChunkIndexLinks() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasVisibleChunkIndexLinks() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Family::Reader::getVisibleChunkIndexLinks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::getVisibleChunkIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void Family::Builder::setVisibleChunkIndexLinks( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline void Family::Builder::setVisibleChunkIndexLinks(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::initVisibleChunkIndexLinks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptVisibleChunkIndexLinks( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Family::Builder::disownVisibleChunkIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasChunkActorIndices() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasChunkActorIndices() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Family::Reader::getChunkActorIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::getChunkActorIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void Family::Builder::setChunkActorIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline void Family::Builder::setChunkActorIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::initChunkActorIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptChunkActorIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Family::Builder::disownChunkActorIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasGraphNodeIndexLinks() const { return !_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasGraphNodeIndexLinks() { return !_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Family::Reader::getGraphNodeIndexLinks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::getGraphNodeIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline void Family::Builder::setGraphNodeIndexLinks( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline void Family::Builder::setGraphNodeIndexLinks(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::initGraphNodeIndexLinks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptGraphNodeIndexLinks( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Family::Builder::disownGraphNodeIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasLowerSupportChunkHealths() const { return !_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasLowerSupportChunkHealths() { return !_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader Family::Reader::getLowerSupportChunkHealths() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder Family::Builder::getLowerSupportChunkHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline void Family::Builder::setLowerSupportChunkHealths( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline void Family::Builder::setLowerSupportChunkHealths(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder Family::Builder::initLowerSupportChunkHealths(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptLowerSupportChunkHealths( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> Family::Builder::disownLowerSupportChunkHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasGraphBondHealths() const { return !_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasGraphBondHealths() { return !_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader Family::Reader::getGraphBondHealths() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder Family::Builder::getGraphBondHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline void Family::Builder::setGraphBondHealths( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline void Family::Builder::setGraphBondHealths(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder Family::Builder::initGraphBondHealths(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptGraphBondHealths( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> Family::Builder::disownGraphBondHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasFamilyGraph() const { return !_reader.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasFamilyGraph() { return !_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::FamilyGraph::Reader Family::Reader::getFamilyGraph() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::get(_reader.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::FamilyGraph::Builder Family::Builder::getFamilyGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::get(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::FamilyGraph::Pipeline Family::Pipeline::getFamilyGraph() { return ::Nv::Blast::Serialization::FamilyGraph::Pipeline(_typeless.getPointerField(7)); } #endif // !CAPNP_LITE inline void Family::Builder::setFamilyGraph( ::Nv::Blast::Serialization::FamilyGraph::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::set(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::FamilyGraph::Builder Family::Builder::initFamilyGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::init(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } inline void Family::Builder::adoptFamilyGraph( ::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::adopt(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph> Family::Builder::disownFamilyGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::disown(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } inline ::uint32_t Family::Reader::getActorCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getActorCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void Family::Builder::setActorCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getBondCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getBondCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void Family::Builder::setBondCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void Family::Builder::setChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void Family::Builder::setNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getLowerSupportChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getLowerSupportChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline void Family::Builder::setLowerSupportChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getUpperSupportChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getUpperSupportChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline void Family::Builder::setUpperSupportChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getFamilyOffset() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getFamilyOffset() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setFamilyOffset( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getFirstVisibleChunkIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getFirstVisibleChunkIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setFirstVisibleChunkIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getVisibleChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getVisibleChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setVisibleChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getFirstGraphNodeIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getFirstGraphNodeIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setFirstGraphNodeIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getGraphNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getGraphNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setGraphNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getLeafChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getLeafChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setLeafChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS, value); } inline bool FamilyGraph::Reader::hasIslandIds() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasIslandIds() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getIslandIds() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getIslandIds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setIslandIds( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setIslandIds(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initIslandIds(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptIslandIds( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownIslandIds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasDirtyNodeLinks() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasDirtyNodeLinks() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getDirtyNodeLinks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getDirtyNodeLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setDirtyNodeLinks( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setDirtyNodeLinks(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initDirtyNodeLinks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptDirtyNodeLinks( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownDirtyNodeLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasFirstDirtyNodeIndices() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasFirstDirtyNodeIndices() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getFirstDirtyNodeIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getFirstDirtyNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setFirstDirtyNodeIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setFirstDirtyNodeIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initFirstDirtyNodeIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptFirstDirtyNodeIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownFirstDirtyNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasFastRoute() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasFastRoute() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getFastRoute() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getFastRoute() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setFastRoute( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setFastRoute(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initFastRoute(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptFastRoute( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownFastRoute() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasHopCounts() const { return !_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasHopCounts() { return !_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getHopCounts() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getHopCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setHopCounts( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setHopCounts(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initHopCounts(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptHopCounts( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownHopCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasIsEdgeRemoved() const { return !_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasIsEdgeRemoved() { return !_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline ::capnp::Data::Reader FamilyGraph::Reader::getIsEdgeRemoved() const { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline ::capnp::Data::Builder FamilyGraph::Builder::getIsEdgeRemoved() { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setIsEdgeRemoved( ::capnp::Data::Reader value) { ::capnp::_::PointerHelpers< ::capnp::Data>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline ::capnp::Data::Builder FamilyGraph::Builder::initIsEdgeRemoved(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::Data>::init(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptIsEdgeRemoved( ::capnp::Orphan< ::capnp::Data>&& value) { ::capnp::_::PointerHelpers< ::capnp::Data>::adopt(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::Data> FamilyGraph::Builder::disownIsEdgeRemoved() { return ::capnp::_::PointerHelpers< ::capnp::Data>::disown(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasIsNodeInDirtyList() const { return !_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasIsNodeInDirtyList() { return !_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline ::capnp::Data::Reader FamilyGraph::Reader::getIsNodeInDirtyList() const { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::capnp::Data::Builder FamilyGraph::Builder::getIsNodeInDirtyList() { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setIsNodeInDirtyList( ::capnp::Data::Reader value) { ::capnp::_::PointerHelpers< ::capnp::Data>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline ::capnp::Data::Builder FamilyGraph::Builder::initIsNodeInDirtyList(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::Data>::init(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptIsNodeInDirtyList( ::capnp::Orphan< ::capnp::Data>&& value) { ::capnp::_::PointerHelpers< ::capnp::Data>::adopt(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::Data> FamilyGraph::Builder::disownIsNodeInDirtyList() { return ::capnp::_::PointerHelpers< ::capnp::Data>::disown(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::uint32_t FamilyGraph::Reader::getNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t FamilyGraph::Builder::getNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void FamilyGraph::Builder::setNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type NvBlastDataBlock::Reader::getDataType() const { return _reader.getDataField< ::Nv::Blast::Serialization::NvBlastDataBlock::Type>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type NvBlastDataBlock::Builder::getDataType() { return _builder.getDataField< ::Nv::Blast::Serialization::NvBlastDataBlock::Type>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastDataBlock::Builder::setDataType( ::Nv::Blast::Serialization::NvBlastDataBlock::Type value) { _builder.setDataField< ::Nv::Blast::Serialization::NvBlastDataBlock::Type>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastDataBlock::Reader::getFormatVersion() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastDataBlock::Builder::getFormatVersion() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvBlastDataBlock::Builder::setFormatVersion( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastDataBlock::Reader::getSize() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastDataBlock::Builder::getSize() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void NvBlastDataBlock::Builder::setSize( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline bool NvBlastChunk::Reader::hasCentroid() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastChunk::Builder::hasCentroid() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader NvBlastChunk::Reader::getCentroid() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder NvBlastChunk::Builder::getCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void NvBlastChunk::Builder::setCentroid( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void NvBlastChunk::Builder::setCentroid(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder NvBlastChunk::Builder::initCentroid(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void NvBlastChunk::Builder::adoptCentroid( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> NvBlastChunk::Builder::disownCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline float NvBlastChunk::Reader::getVolume() const { return _reader.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline float NvBlastChunk::Builder::getVolume() { return _builder.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setVolume(float value) { _builder.setDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getParentChunkIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getParentChunkIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setParentChunkIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getFirstChildIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getFirstChildIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setFirstChildIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getChildIndexStop() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getChildIndexStop() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setChildIndexStop( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getUserData() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getUserData() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setUserData( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS, value); } inline bool NvBlastBond::Reader::hasNormal() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastBond::Builder::hasNormal() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader NvBlastBond::Reader::getNormal() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::getNormal() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void NvBlastBond::Builder::setNormal( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void NvBlastBond::Builder::setNormal(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::initNormal(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void NvBlastBond::Builder::adoptNormal( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> NvBlastBond::Builder::disownNormal() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline float NvBlastBond::Reader::getArea() const { return _reader.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline float NvBlastBond::Builder::getArea() { return _builder.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastBond::Builder::setArea(float value) { _builder.setDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline bool NvBlastBond::Reader::hasCentroid() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastBond::Builder::hasCentroid() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader NvBlastBond::Reader::getCentroid() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::getCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void NvBlastBond::Builder::setCentroid( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline void NvBlastBond::Builder::setCentroid(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::initCentroid(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void NvBlastBond::Builder::adoptCentroid( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> NvBlastBond::Builder::disownCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::uint32_t NvBlastBond::Reader::getUserData() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastBond::Builder::getUserData() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvBlastBond::Builder::setUserData( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastSupportGraph::Reader::getNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastSupportGraph::Builder::getNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastSupportGraph::Builder::setNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline bool NvBlastSupportGraph::Reader::hasChunkIndices() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasChunkIndices() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getChunkIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getChunkIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setChunkIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setChunkIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initChunkIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptChunkIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownChunkIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool NvBlastSupportGraph::Reader::hasAdjacencyPartition() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasAdjacencyPartition() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getAdjacencyPartition() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getAdjacencyPartition() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setAdjacencyPartition( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setAdjacencyPartition(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initAdjacencyPartition(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptAdjacencyPartition( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownAdjacencyPartition() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool NvBlastSupportGraph::Reader::hasAdjacentNodeIndices() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasAdjacentNodeIndices() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getAdjacentNodeIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getAdjacentNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setAdjacentNodeIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setAdjacentNodeIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initAdjacentNodeIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptAdjacentNodeIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownAdjacentNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline bool NvBlastSupportGraph::Reader::hasAdjacentBondIndices() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasAdjacentBondIndices() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getAdjacentBondIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getAdjacentBondIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setAdjacentBondIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setAdjacentBondIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initAdjacentBondIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptAdjacentBondIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownAdjacentBondIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool UUID::Reader::hasValue() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool UUID::Builder::hasValue() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::Data::Reader UUID::Reader::getValue() const { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::Data::Builder UUID::Builder::getValue() { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void UUID::Builder::setValue( ::capnp::Data::Reader value) { ::capnp::_::PointerHelpers< ::capnp::Data>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::Data::Builder UUID::Builder::initValue(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::Data>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void UUID::Builder::adoptValue( ::capnp::Orphan< ::capnp::Data>&& value) { ::capnp::_::PointerHelpers< ::capnp::Data>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::Data> UUID::Builder::disownValue() { return ::capnp::_::PointerHelpers< ::capnp::Data>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } } // namespace } // namespace } // namespace #endif // CAPNP_INCLUDED_9a4a58fac38375e0_
119,518
C
41.654889
141
0.682366
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/generated/NvBlastExtTkSerialization-capn.h
// Generated by Cap'n Proto compiler, DO NOT EDIT // source: NvBlastExtTkSerialization-capn #ifndef CAPNP_INCLUDED_affe4498f275ee58_ #define CAPNP_INCLUDED_affe4498f275ee58_ #include <capnp/generated-header-support.h> #if CAPNP_VERSION != 6001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif #include "NvBlastExtLlSerialization-capn.h" namespace capnp { namespace schemas { CAPNP_DECLARE_SCHEMA(ffd67c4b7067dde6); CAPNP_DECLARE_SCHEMA(b7dbad810488a897); CAPNP_DECLARE_SCHEMA(bf661e95794f2749); } // namespace schemas } // namespace capnp namespace Nv { namespace Blast { namespace Serialization { struct TkAsset { TkAsset() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(ffd67c4b7067dde6, 0, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct TkAssetJointDesc { TkAssetJointDesc() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(b7dbad810488a897, 0, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvVec3 { NvVec3() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(bf661e95794f2749, 2, 0) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; // ======================================================================================= class TkAsset::Reader { public: typedef TkAsset Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasAssetLL() const; inline ::Nv::Blast::Serialization::Asset::Reader getAssetLL() const; inline bool hasJointDescs() const; inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader getJointDescs() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class TkAsset::Builder { public: typedef TkAsset Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasAssetLL(); inline ::Nv::Blast::Serialization::Asset::Builder getAssetLL(); inline void setAssetLL( ::Nv::Blast::Serialization::Asset::Reader value); inline ::Nv::Blast::Serialization::Asset::Builder initAssetLL(); inline void adoptAssetLL(::capnp::Orphan< ::Nv::Blast::Serialization::Asset>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::Asset> disownAssetLL(); inline bool hasJointDescs(); inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder getJointDescs(); inline void setJointDescs( ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder initJointDescs(unsigned int size); inline void adoptJointDescs(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>> disownJointDescs(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class TkAsset::Pipeline { public: typedef TkAsset Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} inline ::Nv::Blast::Serialization::Asset::Pipeline getAssetLL(); private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class TkAssetJointDesc::Reader { public: typedef TkAssetJointDesc Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasNodeIndices() const; inline ::capnp::List< ::uint32_t>::Reader getNodeIndices() const; inline bool hasAttachPositions() const; inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader getAttachPositions() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class TkAssetJointDesc::Builder { public: typedef TkAssetJointDesc Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasNodeIndices(); inline ::capnp::List< ::uint32_t>::Builder getNodeIndices(); inline void setNodeIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setNodeIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initNodeIndices(unsigned int size); inline void adoptNodeIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownNodeIndices(); inline bool hasAttachPositions(); inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder getAttachPositions(); inline void setAttachPositions( ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder initAttachPositions(unsigned int size); inline void adoptAttachPositions(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>> disownAttachPositions(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class TkAssetJointDesc::Pipeline { public: typedef TkAssetJointDesc Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvVec3::Reader { public: typedef NvVec3 Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline float getX() const; inline float getY() const; inline float getZ() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvVec3::Builder { public: typedef NvVec3 Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline float getX(); inline void setX(float value); inline float getY(); inline void setY(float value); inline float getZ(); inline void setZ(float value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvVec3::Pipeline { public: typedef NvVec3 Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE // ======================================================================================= inline bool TkAsset::Reader::hasAssetLL() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool TkAsset::Builder::hasAssetLL() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::Asset::Reader TkAsset::Reader::getAssetLL() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::Asset::Builder TkAsset::Builder::getAssetLL() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::Asset::Pipeline TkAsset::Pipeline::getAssetLL() { return ::Nv::Blast::Serialization::Asset::Pipeline(_typeless.getPointerField(0)); } #endif // !CAPNP_LITE inline void TkAsset::Builder::setAssetLL( ::Nv::Blast::Serialization::Asset::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::Asset::Builder TkAsset::Builder::initAssetLL() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void TkAsset::Builder::adoptAssetLL( ::capnp::Orphan< ::Nv::Blast::Serialization::Asset>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::Asset> TkAsset::Builder::disownAssetLL() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool TkAsset::Reader::hasJointDescs() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool TkAsset::Builder::hasJointDescs() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader TkAsset::Reader::getJointDescs() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder TkAsset::Builder::getJointDescs() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void TkAsset::Builder::setJointDescs( ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder TkAsset::Builder::initJointDescs(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void TkAsset::Builder::adoptJointDescs( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>> TkAsset::Builder::disownJointDescs() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool TkAssetJointDesc::Reader::hasNodeIndices() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool TkAssetJointDesc::Builder::hasNodeIndices() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader TkAssetJointDesc::Reader::getNodeIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder TkAssetJointDesc::Builder::getNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void TkAssetJointDesc::Builder::setNodeIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void TkAssetJointDesc::Builder::setNodeIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder TkAssetJointDesc::Builder::initNodeIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void TkAssetJointDesc::Builder::adoptNodeIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> TkAssetJointDesc::Builder::disownNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool TkAssetJointDesc::Reader::hasAttachPositions() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool TkAssetJointDesc::Builder::hasAttachPositions() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader TkAssetJointDesc::Reader::getAttachPositions() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder TkAssetJointDesc::Builder::getAttachPositions() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void TkAssetJointDesc::Builder::setAttachPositions( ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder TkAssetJointDesc::Builder::initAttachPositions(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void TkAssetJointDesc::Builder::adoptAttachPositions( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>> TkAssetJointDesc::Builder::disownAttachPositions() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline float NvVec3::Reader::getX() const { return _reader.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline float NvVec3::Builder::getX() { return _builder.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvVec3::Builder::setX(float value) { _builder.setDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline float NvVec3::Reader::getY() const { return _reader.getDataField<float>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline float NvVec3::Builder::getY() { return _builder.getDataField<float>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvVec3::Builder::setY(float value) { _builder.setDataField<float>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline float NvVec3::Reader::getZ() const { return _reader.getDataField<float>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline float NvVec3::Builder::getZ() { return _builder.getDataField<float>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void NvVec3::Builder::setZ(float value) { _builder.setDataField<float>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } } // namespace } // namespace } // namespace #endif // CAPNP_INCLUDED_affe4498f275ee58_
20,761
C
37.448148
141
0.682193
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringInternalCommon.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTINTERNALCOMMON_H #define NVBLASTINTERNALCOMMON_H #include "NvBlastExtAuthoringTypes.h" #include "NvBlastNvSharedHelpers.h" #include "NvBlastVolumeIntegrals.h" #include "NvVec2.h" #include "NvVec3.h" #include "NvPlane.h" #include "NvBounds3.h" #include "NvMath.h" #include <algorithm> namespace Nv { namespace Blast { /** Edge representation with index of parent facet */ struct EdgeWithParent { uint32_t s, e; // Starting and ending vertices uint32_t parent; // Parent facet index EdgeWithParent() : s(0), e(0), parent(0) {} EdgeWithParent(uint32_t s, uint32_t e, uint32_t p) : s(s), e(e), parent(p) {} }; /** Comparator for sorting edges according to parent facet number. */ struct EdgeComparator { bool operator()(const EdgeWithParent& a, const EdgeWithParent& b) const { if (a.parent == b.parent) { if (a.s == b.s) { return a.e < b.e; } else { return a.s < b.s; } } else { return a.parent < b.parent; } } }; inline bool operator<(const Edge& a, const Edge& b) { if (a.s == b.s) return a.e < b.e; else return a.s < b.s; } /** Vertex projection direction flag. */ enum ProjectionDirections { YZ_PLANE = 1 << 1, XY_PLANE = 1 << 2, ZX_PLANE = 1 << 3, // This is set when the dominant axis of the normal is negative // because when flattening to 2D the facet is viewed from the positive direction. // As a result, the winding order appears to flip if the normal is in the negative direction. OPPOSITE_WINDING = 1 << 4 }; /** Computes best direction to project points. */ NV_FORCE_INLINE ProjectionDirections getProjectionDirection(const nvidia::NvVec3& normal) { float maxv = std::max(std::abs(normal.x), std::max(std::abs(normal.y), std::abs(normal.z))); ProjectionDirections retVal; if (maxv == std::abs(normal.x)) { retVal = YZ_PLANE; if (normal.x < 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING); return retVal; } if (maxv == std::abs(normal.y)) { retVal = ZX_PLANE; if (normal.y > 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING); return retVal; } retVal = XY_PLANE; if (normal.z < 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING); return retVal; } /** Computes point projected on given axis aligned plane. */ NV_FORCE_INLINE nvidia::NvVec2 getProjectedPoint(const nvidia::NvVec3& point, ProjectionDirections dir) { if (dir & YZ_PLANE) { return nvidia::NvVec2(point.y, point.z); } if (dir & ZX_PLANE) { return nvidia::NvVec2(point.x, point.z); } return nvidia::NvVec2(point.x, point.y); } NV_FORCE_INLINE nvidia::NvVec2 getProjectedPoint(const NvcVec3& point, ProjectionDirections dir) { return getProjectedPoint((const nvidia::NvVec3&)point, dir); } /** Computes point projected on given axis aligned plane, this method is polygon-winding aware. */ NV_FORCE_INLINE nvidia::NvVec2 getProjectedPointWithWinding(const nvidia::NvVec3& point, ProjectionDirections dir) { if (dir & YZ_PLANE) { if (dir & OPPOSITE_WINDING) { return nvidia::NvVec2(point.z, point.y); } else return nvidia::NvVec2(point.y, point.z); } if (dir & ZX_PLANE) { if (dir & OPPOSITE_WINDING) { return nvidia::NvVec2(point.z, point.x); } return nvidia::NvVec2(point.x, point.z); } if (dir & OPPOSITE_WINDING) { return nvidia::NvVec2(point.y, point.x); } return nvidia::NvVec2(point.x, point.y); } #define MAXIMUM_EXTENT 1000 * 1000 * 1000 #define BBOX_TEST_EPS 1e-5f /** Test fattened bounding box intersetion. */ NV_INLINE bool weakBoundingBoxIntersection(const nvidia::NvBounds3& aBox, const nvidia::NvBounds3& bBox) { if (std::max(aBox.minimum.x, bBox.minimum.x) > std::min(aBox.maximum.x, bBox.maximum.x) + BBOX_TEST_EPS) return false; if (std::max(aBox.minimum.y, bBox.minimum.y) > std::min(aBox.maximum.y, bBox.maximum.y) + BBOX_TEST_EPS) return false; if (std::max(aBox.minimum.z, bBox.minimum.z) > std::min(aBox.maximum.z, bBox.maximum.z) + BBOX_TEST_EPS) return false; return true; } /** Test segment vs plane intersection. If segment intersects the plane true is returned. Point of intersection is written into 'result'. */ NV_INLINE bool getPlaneSegmentIntersection(const nvidia::NvPlane& pl, const nvidia::NvVec3& a, const nvidia::NvVec3& b, nvidia::NvVec3& result) { float div = (b - a).dot(pl.n); if (nvidia::NvAbs(div) < 0.0001f) { if (pl.contains(a)) { result = a; return true; } else { return false; } } float t = (-a.dot(pl.n) - pl.d) / div; if (t < 0.0f || t > 1.0f) { return false; } result = (b - a) * t + a; return true; } #define POS_COMPARISON_OFFSET 1e-5f #define NORM_COMPARISON_OFFSET 1e-3f /** Vertex comparator for vertex welding. */ template<bool splitUVs> struct VrtCompare { // This implements a "less than" function for vertices. // Vertices a and b are considered equivalent if !(a < b) && !(b < a) bool operator()(const Vertex& a, const Vertex& b) const { if (a.p.x + POS_COMPARISON_OFFSET < b.p.x) return true; if (a.p.x - POS_COMPARISON_OFFSET > b.p.x) return false; if (a.p.y + POS_COMPARISON_OFFSET < b.p.y) return true; if (a.p.y - POS_COMPARISON_OFFSET > b.p.y) return false; if (a.p.z + POS_COMPARISON_OFFSET < b.p.z) return true; if (a.p.z - POS_COMPARISON_OFFSET > b.p.z) return false; if (a.n.x + NORM_COMPARISON_OFFSET < b.n.x) return true; if (a.n.x - NORM_COMPARISON_OFFSET > b.n.x) return false; if (a.n.y + NORM_COMPARISON_OFFSET < b.n.y) return true; if (a.n.y - NORM_COMPARISON_OFFSET > b.n.y) return false; if (a.n.z + NORM_COMPARISON_OFFSET < b.n.z) return true; if (a.n.z - NORM_COMPARISON_OFFSET > b.n.z) return false; // This is not actually needed if (!splitUVs) if (!splitUVs) return false; if (a.uv[0].x + NORM_COMPARISON_OFFSET < b.uv[0].x) return true; if (a.uv[0].x - NORM_COMPARISON_OFFSET > b.uv[0].x) return false; if (a.uv[0].y + NORM_COMPARISON_OFFSET < b.uv[0].y) return true; if (a.uv[0].y - NORM_COMPARISON_OFFSET > b.uv[0].y) return false; // This is not actually needed return false; }; }; typedef VrtCompare<true> VrtComp; typedef VrtCompare<false> VrtCompNoUV; /** Vertex comparator for vertex welding (not accounts normal and uv parameters of vertice). */ struct VrtPositionComparator { bool operator()(const NvcVec3& a, const NvcVec3& b) const { if (a.x + POS_COMPARISON_OFFSET < b.x) return true; if (a.x - POS_COMPARISON_OFFSET > b.x) return false; if (a.y + POS_COMPARISON_OFFSET < b.y) return true; if (a.y - POS_COMPARISON_OFFSET > b.y) return false; if (a.z + POS_COMPARISON_OFFSET < b.z) return true; if (a.z - POS_COMPARISON_OFFSET > b.z) return false; return false; }; bool operator()(const Vertex& a, const Vertex& b) const { return operator()(a.p, b.p); }; }; NV_INLINE float calculateCollisionHullVolumeAndCentroid(NvcVec3& centroid, const CollisionHull& hull) { class CollisionHullQuery { public: CollisionHullQuery(const CollisionHull& hull) : m_hull(hull) {} size_t faceCount() const { return (size_t)m_hull.polygonDataCount; } size_t vertexCount(size_t faceIndex) const { return (size_t)m_hull.polygonData[faceIndex].vertexCount; } NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) const { return m_hull.points[m_hull.indices[m_hull.polygonData[faceIndex].indexBase + vertexIndex]]; } private: const CollisionHull& m_hull; }; return calculateMeshVolumeAndCentroid<CollisionHullQuery>(centroid, hull); } } // namespace Blast } // namespace Nv #endif
9,905
C
30.150943
133
0.639475
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringAcceleratorImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtAuthoringAcceleratorImpl.h" #include "NvBlastExtAuthoringMesh.h" #include "NvBlastExtAuthoringInternalCommon.h" #include "NvBlastGlobals.h" #include "NvBlastNvSharedHelpers.h" #include "NvCMath.h" namespace Nv { namespace Blast { DummyAccelerator::DummyAccelerator(int32_t count) : m_count(count) { m_current = 0; } void DummyAccelerator::release() { NVBLAST_DELETE(this, DummyAccelerator); } void DummyAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { m_current = 0; NV_UNUSED(pos); NV_UNUSED(ed); NV_UNUSED(fc); } void DummyAccelerator::setState(const NvcBounds3* bound) { m_current = 0; NV_UNUSED(bound); } void DummyAccelerator::setState(const NvcVec3& point) { m_current = 0; NV_UNUSED(point); } int32_t DummyAccelerator::getNextFacet() { if (m_current < m_count) { ++m_current; return m_current - 1; } else return -1; } Grid::Grid(int32_t resolution) : m_resolution(resolution) { /** Set up 3d grid */ m_r3 = resolution * resolution * resolution; m_spatialMap.resize(resolution * resolution * resolution); } void Grid::release() { NVBLAST_DELETE(this, Grid); } void Grid::setMesh(const Mesh* m) { nvidia::NvBounds3 bd = toNvShared(m->getBoundingBox()); m_mappedFacetCount = m->getFacetCount(); bd.fattenFast(0.001f); m_spos = fromNvShared(bd.minimum); m_deltas = { m_resolution / bd.getDimensions().x, m_resolution / bd.getDimensions().y, m_resolution / bd.getDimensions().z }; for (int32_t i = 0; i < m_r3; ++i) m_spatialMap[i].clear(); const float ofs = 0.001f; for (uint32_t fc = 0; fc < m->getFacetCount(); ++fc) { NvcBounds3 cfc = *m->getFacetBound(fc); int32_t is = (int32_t)std::max(0.f, (cfc.minimum.x - m_spos.x - ofs) * m_deltas.x); int32_t ie = (int32_t)std::max(0.f, (cfc.maximum.x - m_spos.x + ofs) * m_deltas.x); int32_t js = (int32_t)std::max(0.f, (cfc.minimum.y - m_spos.y - ofs) * m_deltas.y); int32_t je = (int32_t)std::max(0.f, (cfc.maximum.y - m_spos.y + ofs) * m_deltas.y); int32_t ks = (int32_t)std::max(0.f, (cfc.minimum.z - m_spos.z - ofs) * m_deltas.z); int32_t ke = (int32_t)std::max(0.f, (cfc.maximum.z - m_spos.z + ofs) * m_deltas.z); for (int32_t i = is; i < m_resolution && i <= ie; ++i) { for (int32_t j = js; j < m_resolution && j <= je; ++j) { for (int32_t k = ks; k < m_resolution && k <= ke; ++k) { m_spatialMap[(i * m_resolution + j) * m_resolution + k].push_back(fc); } } } } } GridAccelerator::GridAccelerator(Grid* grd) { m_grid = grd; m_alreadyGotValue = 0; m_alreadyGotFlag.resize(1 << 12); m_cellList.resize(1 << 12); m_pointCmdDir = 0; } void GridAccelerator::release() { NVBLAST_DELETE(this, GridAccelerator); } void GridAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { nvidia::NvBounds3 cfc(nvidia::NvBounds3::empty()); for (uint32_t v = 0; v < fc.edgesCount; ++v) { cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].s].p)); cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].e].p)); } setState(&fromNvShared(cfc)); } void GridAccelerator::setState(const NvcBounds3* facetBounding) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; NvcBounds3 cfc = *facetBounding; int32_t is = (int32_t)std::max(0.f, (cfc.minimum.x - m_grid->m_spos.x - 0.001f) * m_grid->m_deltas.x); int32_t ie = (int32_t)std::max(0.f, (cfc.maximum.x - m_grid->m_spos.x + 0.001f) * m_grid->m_deltas.x); int32_t js = (int32_t)std::max(0.f, (cfc.minimum.y - m_grid->m_spos.y - 0.001f) * m_grid->m_deltas.y); int32_t je = (int32_t)std::max(0.f, (cfc.maximum.y - m_grid->m_spos.y + 0.001f) * m_grid->m_deltas.y); int32_t ks = (int32_t)std::max(0.f, (cfc.minimum.z - m_grid->m_spos.z - 0.001f) * m_grid->m_deltas.z); int32_t ke = (int32_t)std::max(0.f, (cfc.maximum.z - m_grid->m_spos.z + 0.001f) * m_grid->m_deltas.z); for (int32_t i = is; i < m_grid->m_resolution && i <= ie; ++i) { for (int32_t j = js; j < m_grid->m_resolution && j <= je; ++j) { for (int32_t k = ks; k < m_grid->m_resolution && k <= ke; ++k) { int32_t id = (i * m_grid->m_resolution + j) * m_grid->m_resolution + k; if (!m_grid->m_spatialMap[id].empty()) { m_cellList[m_gotCells++] = id; } } } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } void GridAccelerator::setPointCmpDirection(int32_t d) { m_pointCmdDir = d; } void GridAccelerator::setState(const NvcVec3& point) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; int32_t is = (int32_t)std::max(0.f, (point.x - m_grid->m_spos.x - 0.001f) * m_grid->m_deltas.x); int32_t ie = (int32_t)std::max(0.f, (point.x - m_grid->m_spos.x + 0.001f) * m_grid->m_deltas.x); int32_t js = (int32_t)std::max(0.f, (point.y - m_grid->m_spos.y - 0.001f) * m_grid->m_deltas.y); int32_t je = (int32_t)std::max(0.f, (point.y - m_grid->m_spos.y + 0.001f) * m_grid->m_deltas.y); int32_t ks = 0; int32_t ke = m_grid->m_resolution; switch (m_pointCmdDir) { case 1: ks = (int32_t)std::max(0.f, (point.z - m_grid->m_spos.z - 0.001f) * m_grid->m_deltas.z); break; case -1: ke = (int32_t)std::max(0.f, (point.z - m_grid->m_spos.z + 0.001f) * m_grid->m_deltas.z); } for (int32_t i = is; i < m_grid->m_resolution && i <= ie; ++i) { for (int32_t j = js; j < m_grid->m_resolution && j <= je; ++j) { for (int32_t k = ks; k <= ke && k < m_grid->m_resolution; ++k) { int32_t id = (i * m_grid->m_resolution + j) * m_grid->m_resolution + k; if (!m_grid->m_spatialMap[id].empty()) { m_cellList[m_gotCells++] = id; } } } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } int32_t GridAccelerator::getNextFacet() { int32_t facetId = -1; while (m_iteratorCell != -1) { if (m_iteratorFacet >= (int32_t)m_grid->m_spatialMap[m_iteratorCell].size()) { if (m_gotCells != 0) { m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; m_iteratorFacet = 0; } else { m_iteratorCell = -1; break; } } if (m_alreadyGotFlag[m_grid->m_spatialMap[m_iteratorCell][m_iteratorFacet]] != m_alreadyGotValue) { facetId = m_grid->m_spatialMap[m_iteratorCell][m_iteratorFacet]; m_iteratorFacet++; break; } else { m_iteratorFacet++; } } if (facetId != -1) { m_alreadyGotFlag[facetId] = m_alreadyGotValue; } return facetId; } BBoxBasedAccelerator::BBoxBasedAccelerator(const Mesh* mesh, int32_t resolution) : m_resolution(resolution), m_alreadyGotValue(1) { m_bounds = mesh->getBoundingBox(); m_spatialMap.resize(resolution * resolution * resolution); m_cells.resize(resolution * resolution * resolution); int32_t currentCell = 0; NvcVec3 incr = (m_bounds.maximum - m_bounds.minimum) * (1.0f / m_resolution); for (int32_t z = 0; z < resolution; ++z) { for (int32_t y = 0; y < resolution; ++y) { for (int32_t x = 0; x < resolution; ++x) { m_cells[currentCell].minimum.x = m_bounds.minimum.x + x * incr.x; m_cells[currentCell].minimum.y = m_bounds.minimum.y + y * incr.y; m_cells[currentCell].minimum.z = m_bounds.minimum.z + z * incr.z; m_cells[currentCell].maximum.x = m_bounds.minimum.x + (x + 1) * incr.x; m_cells[currentCell].maximum.y = m_bounds.minimum.y + (y + 1) * incr.y; m_cells[currentCell].maximum.z = m_bounds.minimum.z + (z + 1) * incr.z; ++currentCell; } } } m_cellList.resize(1 << 16); m_gotCells = 0; buildAccelStructure(mesh->getVertices(), mesh->getEdges(), mesh->getFacetsBuffer(), mesh->getFacetCount()); } void BBoxBasedAccelerator::release() { NVBLAST_DELETE(this, BBoxBasedAccelerator); } BBoxBasedAccelerator::~BBoxBasedAccelerator() { m_resolution = 0; toNvShared(m_bounds).setEmpty(); m_spatialMap.clear(); m_cells.clear(); m_cellList.clear(); } int32_t BBoxBasedAccelerator::getNextFacet() { int32_t facetId = -1; while (m_iteratorCell != -1) { if (m_iteratorFacet >= (int32_t)m_spatialMap[m_iteratorCell].size()) { if (m_gotCells != 0) { m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; m_iteratorFacet = 0; } else { m_iteratorCell = -1; break; } } if (m_alreadyGotFlag[m_spatialMap[m_iteratorCell][m_iteratorFacet]] != m_alreadyGotValue) { facetId = m_spatialMap[m_iteratorCell][m_iteratorFacet]; m_iteratorFacet++; break; } else { m_iteratorFacet++; } } if (facetId != -1) { m_alreadyGotFlag[facetId] = m_alreadyGotValue; } return facetId; } void BBoxBasedAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { nvidia::NvBounds3 cfc(nvidia::NvBounds3::empty()); for (uint32_t v = 0; v < fc.edgesCount; ++v) { cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].s].p)); cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].e].p)); } setState(&fromNvShared(cfc)); } void BBoxBasedAccelerator::setState(const NvcBounds3* facetBox) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; for (uint32_t i = 0; i < m_cells.size(); ++i) { if (weakBoundingBoxIntersection(toNvShared(m_cells[i]), *toNvShared(facetBox))) { if (!m_spatialMap[i].empty()) m_cellList[m_gotCells++] = i; } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } void BBoxBasedAccelerator::setState(const NvcVec3& p) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; int32_t perSlice = m_resolution * m_resolution; for (uint32_t i = 0; i < m_cells.size(); ++i) { if (toNvShared(m_cells[i]).contains(toNvShared(p))) { int32_t xyCellId = i % perSlice; for (int32_t zCell = 0; zCell < m_resolution; ++zCell) { int32_t cell = zCell * perSlice + xyCellId; if (!m_spatialMap[cell].empty()) m_cellList[m_gotCells++] = cell; } } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } void BBoxBasedAccelerator::buildAccelStructure(const Vertex* pos, const Edge* edges, const Facet* fc, int32_t facetCount) { for (int32_t facet = 0; facet < facetCount; ++facet) { nvidia::NvBounds3 bBox; bBox.setEmpty(); const Edge* edge = &edges[0] + fc->firstEdgeNumber; int32_t count = fc->edgesCount; for (int32_t ec = 0; ec < count; ++ec) { bBox.include(toNvShared(pos[edge->s].p)); bBox.include(toNvShared(pos[edge->e].p)); edge++; } for (uint32_t i = 0; i < m_cells.size(); ++i) { if (weakBoundingBoxIntersection(toNvShared(m_cells[i]), bBox)) { m_spatialMap[i].push_back(facet); } } fc++; } m_alreadyGotFlag.resize(facetCount, 0); } #define SWEEP_RESOLUTION 2048 void buildIndex(std::vector<SegmentToIndex>& segm, float offset, float mlt, std::vector<std::vector<uint32_t>>& blocks) { std::set<uint32_t> currentEnabled; uint32_t lastBlock = 0; for (uint32_t i = 0; i < segm.size(); ++i) { uint32_t currentBlock = (uint32_t)((segm[i].coord - offset) * mlt); if (currentBlock >= SWEEP_RESOLUTION) break; if (currentBlock != lastBlock) { for (uint32_t j = lastBlock + 1; j <= currentBlock; ++j) { for (auto id : currentEnabled) blocks[j].push_back(id); } lastBlock = currentBlock; } if (segm[i].end == false) { blocks[lastBlock].push_back(segm[i].index); currentEnabled.insert(segm[i].index); } else { currentEnabled.erase(segm[i].index); } } } SweepingAccelerator::SweepingAccelerator(const Nv::Blast::Mesh* in) { nvidia::NvBounds3 bnd; const Vertex* verts = in->getVertices(); const Edge* edges = in->getEdges(); m_facetCount = in->getFacetCount(); m_foundx.resize(m_facetCount, 0); m_foundy.resize(m_facetCount, 0); std::vector<SegmentToIndex> xevs; std::vector<SegmentToIndex> yevs; std::vector<SegmentToIndex> zevs; for (uint32_t i = 0; i < in->getFacetCount(); ++i) { const Facet* fc = in->getFacet(i); bnd.setEmpty(); for (uint32_t v = 0; v < fc->edgesCount; ++v) { bnd.include(toNvShared(verts[edges[v + fc->firstEdgeNumber].s].p)); } bnd.scaleFast(1.1f); xevs.push_back(SegmentToIndex(bnd.minimum.x, i, false)); xevs.push_back(SegmentToIndex(bnd.maximum.x, i, true)); yevs.push_back(SegmentToIndex(bnd.minimum.y, i, false)); yevs.push_back(SegmentToIndex(bnd.maximum.y, i, true)); zevs.push_back(SegmentToIndex(bnd.minimum.z, i, false)); zevs.push_back(SegmentToIndex(bnd.maximum.z, i, true)); } std::sort(xevs.begin(), xevs.end()); std::sort(yevs.begin(), yevs.end()); std::sort(zevs.begin(), zevs.end()); m_minimal.x = xevs[0].coord; m_minimal.y = yevs[0].coord; m_minimal.z = zevs[0].coord; m_maximal.x = xevs.back().coord; m_maximal.y = yevs.back().coord; m_maximal.z = zevs.back().coord; m_rescale = (m_maximal - m_minimal) * 1.01f; m_rescale.x = 1.0f / m_rescale.x * SWEEP_RESOLUTION; m_rescale.y = 1.0f / m_rescale.y * SWEEP_RESOLUTION; m_rescale.z = 1.0f / m_rescale.z * SWEEP_RESOLUTION; m_xSegm.resize(SWEEP_RESOLUTION); m_ySegm.resize(SWEEP_RESOLUTION); m_zSegm.resize(SWEEP_RESOLUTION); buildIndex(xevs, m_minimal.x, m_rescale.x, m_xSegm); buildIndex(yevs, m_minimal.y, m_rescale.y, m_ySegm); buildIndex(zevs, m_minimal.z, m_rescale.z, m_zSegm); m_iterId = 1; m_current = 0; } void SweepingAccelerator::release() { NVBLAST_DELETE(this, SweepingAccelerator); } void SweepingAccelerator::setState(const NvcBounds3* facetBounds) { m_current = 0; m_indices.clear(); nvidia::NvBounds3 bnd = *toNvShared(facetBounds); bnd.scaleFast(1.1f); uint32_t start = (uint32_t)((std::max(0.0f, bnd.minimum.x - m_minimal.x)) * m_rescale.x); uint32_t end = (uint32_t)((std::max(0.0f, bnd.maximum.x - m_minimal.x)) * m_rescale.x); for (uint32_t i = start; i <= end && i < SWEEP_RESOLUTION; ++i) { for (auto id : m_xSegm[i]) { m_foundx[id] = m_iterId; } } start = (uint32_t)((std::max(0.0f, bnd.minimum.y - m_minimal.y)) * m_rescale.y); end = (uint32_t)((std::max(0.0f, bnd.maximum.y - m_minimal.y)) * m_rescale.y); for (uint32_t i = start; i <= end && i < SWEEP_RESOLUTION; ++i) { for (auto id : m_ySegm[i]) { m_foundy[id] = m_iterId; } } start = (uint32_t)((std::max(0.0f, bnd.minimum.z - m_minimal.z)) * m_rescale.z); end = (uint32_t)((std::max(0.0f, bnd.maximum.z - m_minimal.z)) * m_rescale.z); for (uint32_t i = start; i <= end && i < SWEEP_RESOLUTION; ++i) { for (auto id : m_zSegm[i]) { if (m_foundy[id] == m_iterId && m_foundx[id] == m_iterId) { m_foundx[id] = m_iterId + 1; m_foundy[id] = m_iterId + 1; m_indices.push_back(id); } } } m_iterId += 2; } void SweepingAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { nvidia::NvBounds3 cfc(nvidia::NvBounds3::empty()); for (uint32_t v = 0; v < fc.edgesCount; ++v) { cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].s].p)); cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].e].p)); } setState(&fromNvShared(cfc)); } void SweepingAccelerator::setState(const NvcVec3& point) { m_indices.clear(); /*for (uint32_t i = 0; i < facetCount; ++i) { indices.push_back(i); }*/ uint32_t yIndex = (uint32_t)((point.y - m_minimal.y) * m_rescale.y); uint32_t xIndex = (uint32_t)((point.x - m_minimal.x) * m_rescale.x); for (uint32_t i = 0; i < m_xSegm[xIndex].size(); ++i) { m_foundx[m_xSegm[xIndex][i]] = m_iterId; } for (uint32_t i = 0; i < m_ySegm[yIndex].size(); ++i) { if (m_foundx[m_ySegm[yIndex][i]] == m_iterId) { m_indices.push_back(m_ySegm[yIndex][i]); } } m_iterId++; m_current = 0; NV_UNUSED(point); } int32_t SweepingAccelerator::getNextFacet() { if (static_cast<uint32_t>(m_current) < m_indices.size()) { ++m_current; return m_indices[m_current - 1]; } else return -1; } } // namespace Blast } // namespace Nv
19,981
C++
28.602963
129
0.563285
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringAcceleratorImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGACCELERATORIMPL_H #define NVBLASTEXTAUTHORINGACCELERATORIMPL_H #include <set> #include <vector> #include "NvBlastExtAuthoringAccelerator.h" namespace Nv { namespace Blast { class Mesh; /** Dummy accelerator iterates through all facets of mesh. */ class DummyAccelerator : public SpatialAccelerator { public: /** \param[in] count Mesh facets count for which accelerator should be built. */ DummyAccelerator(int32_t count); virtual void release() override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcVec3& point) override; virtual int32_t getNextFacet() override; virtual void setPointCmpDirection(int32_t dir) override { NV_UNUSED(dir); } private: int32_t m_count; int32_t m_current; }; struct SegmentToIndex { float coord; uint32_t index; bool end; SegmentToIndex(float c, uint32_t i, bool end) : coord(c), index(i), end(end) {} bool operator<(const SegmentToIndex& in) const { if (coord < in.coord) return true; if (coord > in.coord) return false; return end < in.end; } }; class Grid : public SpatialGrid { public: friend class GridAccelerator; Grid(int32_t resolution); virtual void release() override; virtual void setMesh(const Nv::Blast::Mesh* m) override; private: int32_t m_resolution; int32_t m_r3; int32_t m_mappedFacetCount; NvcVec3 m_spos; NvcVec3 m_deltas; std::vector< std::vector<int32_t> > m_spatialMap; }; class GridAccelerator : public SpatialAccelerator // Iterator to traverse the grid { public: GridAccelerator(Grid* grd); virtual void release() override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcVec3& point) override; virtual int32_t getNextFacet() override; virtual void setPointCmpDirection(int32_t dir) override; private: Grid* m_grid; // Iterator data std::vector<uint32_t> m_alreadyGotFlag; uint32_t m_alreadyGotValue; std::vector<int32_t> m_cellList; int32_t m_gotCells; int32_t m_iteratorCell; int32_t m_iteratorFacet; int32_t m_pointCmdDir; }; class SweepingAccelerator : public SpatialAccelerator { public: /** \param[in] count Mesh facets count for which accelerator should be built. */ SweepingAccelerator(const Nv::Blast::Mesh* in); virtual void release() override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const NvcVec3& point) override; virtual int32_t getNextFacet() override; virtual void setPointCmpDirection(int32_t dir) override { NV_UNUSED(dir); } private: /* For fast point test. */ std::vector<std::vector<uint32_t> > m_xSegm; std::vector<std::vector<uint32_t> > m_ySegm; std::vector<std::vector<uint32_t> > m_zSegm; std::vector<uint32_t> m_indices; std::vector<uint32_t> m_foundx; std::vector<uint32_t> m_foundy; uint32_t m_iterId; int32_t m_current; uint32_t m_facetCount; NvcVec3 m_minimal; NvcVec3 m_maximal; NvcVec3 m_rescale; }; /** Accelerator which builds map from 3d grid to initial mesh facets. To find all facets which possibly intersect given one, it return all facets which are pointed by grid cells, which intersects with bounding box of given facet. To find all facets which possibly cover given point, all facets which are pointed by cells in column which contains given point are returned. */ class BBoxBasedAccelerator : public SpatialAccelerator { public: /** \param[in] mesh Mesh for which acceleration structure should be built. \param[in] resolution Resolution on 3d grid. */ BBoxBasedAccelerator(const Mesh* mesh, int32_t resolution); virtual ~BBoxBasedAccelerator(); virtual void release() override; virtual int32_t getNextFacet() override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const NvcVec3& p) override; virtual void setPointCmpDirection(int32_t dir) override { NV_UNUSED(dir); } private: void buildAccelStructure(const Vertex* pos, const Edge* edges, const Facet* fc, int32_t facetCount); int32_t m_resolution; NvcBounds3 m_bounds; std::vector< std::vector<int32_t> > m_spatialMap; std::vector<NvcBounds3> m_cells; // Iterator data std::vector<uint32_t> m_alreadyGotFlag; uint32_t m_alreadyGotValue; std::vector<int32_t> m_cellList; int32_t m_gotCells; int32_t m_iteratorCell; int32_t m_iteratorFacet; }; } // namespace Blast } // namsepace Nv #endif // ifndef NVBLASTEXTAUTHORINGACCELERATORIMPL_H
7,801
C
35.12037
171
0.610563
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringMeshImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTAUTHORINGMESHIMPL_H #define NVBLASTAUTHORINGMESHIMPL_H #include "NvBlastExtAuthoringMesh.h" #include "NvBounds3.h" #include <vector> #include <map> #include <set> namespace Nv { namespace Blast { /** Class for internal mesh representation */ class MeshImpl : public Mesh { public: /** Constructs mesh object from array of triangles. \param[in] position Array of vertex positions \param[in] normals Array of vertex normals \param[in] uv Array of vertex uv coordinates \param[in] verticesCount Vertices count \param[in] indices Array of vertex indices. Indices contain vertex index triplets which form a mesh triangle. \param[in] indicesCount Indices count (should be equal to numberOfTriangles * 3) */ MeshImpl(const NvcVec3* position, const NvcVec3* normals, const NvcVec2* uv, uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount); /** Constructs mesh object from array of facets. \param[in] vertices Array of vertices \param[in] edges Array of edges \param[in] facets Array of facets \param[in] posCount Vertices count \param[in] edgesCount Edges count \param[in] facetsCount Facets count */ MeshImpl(const Vertex* vertices, const Edge* edges, const Facet* facets, uint32_t posCount, uint32_t edgesCount, uint32_t facetsCount); MeshImpl(const Vertex* vertices, uint32_t count); MeshImpl(const Vertex* vertices, uint32_t count, uint32_t* indices, uint32_t indexCount, void* materials, uint32_t materialStride); ~MeshImpl(); virtual void release() override; /** Return true if mesh is valid */ bool isValid() const override; /** Return pointer on vertices array */ Vertex* getVerticesWritable() override; /** Return pointer on edges array */ Edge* getEdgesWritable() override; /** Return pointer on facets array */ Facet* getFacetsBufferWritable() override; /** Return pointer on vertices array */ const Vertex* getVertices() const override; /** Return pointer on edges array */ const Edge* getEdges() const override; /** Return pointer on facets array */ const Facet* getFacetsBuffer() const override; /** Return writable pointer on specified facet */ Facet* getFacetWritable(int32_t facet) override; /** Return writable pointer on specified facet */ const Facet* getFacet(int32_t facet) const override; /** Return edges count */ uint32_t getEdgesCount() const override; /** Return vertices count */ uint32_t getVerticesCount() const override; /** Return facet count */ uint32_t getFacetCount() const override; /** Return reference on mesh bounding box. */ const NvcBounds3& getBoundingBox() const override; /** Return writable reference on mesh bounding box. */ NvcBounds3& getBoundingBoxWritable() override; /** Recalculate bounding box */ void recalculateBoundingBox() override; /** Compute mesh volume and centroid. Assumes mesh has outward normals and no holes. */ float getMeshVolumeAndCentroid(NvcVec3& centroid) const override; /** Set per-facet material id. */ void setMaterialId(const int32_t* materialIds) override; /** Replaces an material id on faces with a new one */ void replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) override; /** Set per-facet smoothing group. */ void setSmoothingGroup(const int32_t* smoothingGroups) override; /** Calculate per-facet bounding boxes. */ virtual void calcPerFacetBounds() override; /** Get pointer on facet bounding box, if not calculated return nullptr. */ virtual const NvcBounds3* getFacetBound(uint32_t index) const override; private: std::vector<Vertex> mVertices; std::vector<Edge> mEdges; std::vector<Facet> mFacets; nvidia::NvBounds3 mBounds; std::vector<nvidia::NvBounds3> mPerFacetBounds; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTAUTHORINGMESHIMPL_H
6,150
C
30.22335
153
0.657724
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringMeshImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #define _CRT_SECURE_NO_WARNINGS #include "NvBlastExtAuthoringMeshImpl.h" #include "NvBlastExtAuthoringTypes.h" #include <NvBlastAssert.h> #include "NvMath.h" #include <NvBlastNvSharedHelpers.h> #include <NvBlastVolumeIntegrals.h> #include <cmath> #include <string.h> #include <vector> #include <algorithm> namespace Nv { namespace Blast { MeshImpl::MeshImpl(const NvcVec3* position, const NvcVec3* normals, const NvcVec2* uv, uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount) { mVertices.resize(verticesCount); for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].p = position[i]; } if (normals != 0) { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].n = normals[i]; } } else { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].n = {0, 0, 0}; } } if (uv != 0) { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].uv[0] = uv[i]; } } else { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].uv[0] = {0, 0}; } } mEdges.resize(indicesCount); mFacets.resize(indicesCount / 3); int32_t facetId = 0; for (uint32_t i = 0; i < indicesCount; i += 3) { mEdges[i].s = indices[i]; mEdges[i].e = indices[i + 1]; mEdges[i + 1].s = indices[i + 1]; mEdges[i + 1].e = indices[i + 2]; mEdges[i + 2].s = indices[i + 2]; mEdges[i + 2].e = indices[i]; mFacets[facetId].firstEdgeNumber = i; mFacets[facetId].edgesCount = 3; mFacets[facetId].materialId = 0; //Unassigned for now mFacets[facetId].smoothingGroup = -1; facetId++; } recalculateBoundingBox(); } MeshImpl::MeshImpl(const Vertex* vertices, const Edge* edges, const Facet* facets, uint32_t posCount, uint32_t edgesCount, uint32_t facetsCount) { mVertices.resize(posCount); mEdges.resize(edgesCount); mFacets.resize(facetsCount); memcpy(mVertices.data(), vertices, sizeof(Vertex) * posCount); memcpy(mEdges.data(), edges, sizeof(Edge) * edgesCount); memcpy(mFacets.data(), facets, sizeof(Facet) * facetsCount); recalculateBoundingBox(); } MeshImpl::MeshImpl(const Vertex* vertices, uint32_t count) { mVertices = std::vector<Vertex>(vertices, vertices + count); mEdges.resize(count); mFacets.resize(count / 3); uint32_t vp = 0; for (uint32_t i = 0; i < count; i += 3) { mEdges[i].s = vp; mEdges[i].e = vp + 1; mEdges[i + 1].s = vp + 1; mEdges[i + 1].e = vp + 2; mEdges[i + 2].s = vp + 2; mEdges[i + 2].e = vp; vp += 3; } for (uint32_t i = 0; i < count / 3; ++i) { mFacets[i].edgesCount = 3; mFacets[i].firstEdgeNumber = i * 3; } recalculateBoundingBox(); } MeshImpl::MeshImpl(const Vertex* vertices, uint32_t count, uint32_t* indices, uint32_t indexCount, void* materials, uint32_t materialStride) { mVertices = std::vector<Vertex>(vertices, vertices + count); mEdges.resize(indexCount); mFacets.resize(indexCount / 3); for (uint32_t i = 0; i < indexCount; i += 3) { mEdges[i].s = indices[i]; mEdges[i].e = indices[i + 1]; mEdges[i + 1].s = indices[i + 1]; mEdges[i + 1].e = indices[i + 2]; mEdges[i + 2].s = indices[i + 2]; mEdges[i + 2].e = indices[i]; } for (uint32_t i = 0; i < indexCount / 3; ++i) { mFacets[i].edgesCount = 3; mFacets[i].firstEdgeNumber = i * 3; mFacets[i].userData = 0; if (materials != nullptr) { mFacets[i].materialId = *(uint32_t*)((uint8_t*)materials + i * materialStride); } } recalculateBoundingBox(); } float MeshImpl::getMeshVolumeAndCentroid(NvcVec3& centroid) const { class MeshImplQuery { public: MeshImplQuery(const MeshImpl& mesh) : m_mesh(mesh) {} size_t faceCount() const { return (size_t)m_mesh.getFacetCount(); } size_t vertexCount(size_t faceIndex) const { return (size_t)m_mesh.getFacet((int32_t)faceIndex)->edgesCount; } NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) const { const Nv::Blast::Facet* facet = m_mesh.getFacet(faceIndex); return m_mesh.getVertices()[m_mesh.getEdges()[facet->firstEdgeNumber + vertexIndex].s].p; } const MeshImpl& m_mesh; }; return calculateMeshVolumeAndCentroid<MeshImplQuery>(centroid, *this); } uint32_t MeshImpl::getFacetCount() const { return static_cast<uint32_t>(mFacets.size()); } Vertex* MeshImpl::getVerticesWritable() { return mVertices.data(); } Edge* MeshImpl::getEdgesWritable() { return mEdges.data(); } const Vertex* MeshImpl::getVertices() const { return mVertices.data(); } const Edge* MeshImpl::getEdges() const { return mEdges.data(); } uint32_t MeshImpl::getEdgesCount() const { return static_cast<uint32_t>(mEdges.size()); } uint32_t MeshImpl::getVerticesCount() const { return static_cast<uint32_t>(mVertices.size()); } Facet* MeshImpl::getFacetsBufferWritable() { return mFacets.data(); } const Facet* MeshImpl::getFacetsBuffer() const { return mFacets.data(); } Facet* MeshImpl::getFacetWritable(int32_t facet) { return &mFacets[facet]; } const Facet* MeshImpl::getFacet(int32_t facet) const { return &mFacets[facet]; } MeshImpl::~MeshImpl() { } void MeshImpl::release() { delete this; } const NvcBounds3& MeshImpl::getBoundingBox() const { return fromNvShared(mBounds); } NvcBounds3& MeshImpl::getBoundingBoxWritable() { return fromNvShared(mBounds); } void MeshImpl::recalculateBoundingBox() { mBounds.setEmpty(); for (uint32_t i = 0; i < mVertices.size(); ++i) { mBounds.include(toNvShared(mVertices[i].p)); } calcPerFacetBounds(); } const NvcBounds3* MeshImpl::getFacetBound(uint32_t index) const { if (mPerFacetBounds.empty()) { return nullptr; } return &fromNvShared(mPerFacetBounds[index]); } void MeshImpl::calcPerFacetBounds() { mPerFacetBounds.resize(mFacets.size()); for (uint32_t i = 0; i < mFacets.size(); ++i) { auto& fb = mPerFacetBounds[i]; fb.setEmpty(); for (uint32_t v = 0; v < mFacets[i].edgesCount; ++v) { fb.include(toNvShared(mVertices[mEdges[mFacets[i].firstEdgeNumber + v].s].p)); fb.include(toNvShared(mVertices[mEdges[mFacets[i].firstEdgeNumber + v].e].p)); } } } void MeshImpl::setMaterialId(const int32_t* materialId) { if (materialId != nullptr) { for (uint32_t i = 0; i < mFacets.size(); ++i) { mFacets[i].materialId = *materialId; ++materialId; } } } bool MeshImpl::isValid() const { return mVertices.size() > 0 && mEdges.size() > 0 && mFacets.size() > 0; } void MeshImpl::replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) { for (uint32_t i = 0; i < mFacets.size(); ++i) { if (mFacets[i].materialId == oldMaterialId) { mFacets[i].materialId = newMaterialId; } } } void MeshImpl::setSmoothingGroup(const int32_t* smoothingGroups) { if (smoothingGroups != nullptr) { for (uint32_t i = 0; i < mFacets.size(); ++i) { mFacets[i].smoothingGroup = *smoothingGroups; ++smoothingGroups; } } } } // namespace Blast } // namespace Nv
9,218
C++
25.34
144
0.622152
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkActorImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { TkActorImpl* TkActorImpl::create(const TkActorDesc& desc) { const TkAssetImpl* asset = static_cast<const TkAssetImpl*>(desc.asset); TkFamilyImpl* family = TkFamilyImpl::create(asset); NvBlastFamily* familyLL = family->getFamilyLLInternal(); Array<char>::type scratch((uint32_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(familyLL, logLL)); NvBlastActor* actorLL = NvBlastFamilyCreateFirstActor(familyLL, &desc, scratch.begin(), logLL); if (actorLL == nullptr) { NVBLAST_LOG_ERROR("TkActorImpl::create: low-level actor could not be created."); return nullptr; } TkActorImpl* actor = family->addActor(actorLL); if (actor != nullptr) { // Add internal joints const uint32_t internalJointCount = asset->getJointDescCountInternal(); const TkAssetJointDesc* jointDescs = asset->getJointDescsInternal(); const NvBlastSupportGraph graph = asset->getGraph(); TkJointImpl* joints = family->getInternalJoints(); for (uint32_t jointNum = 0; jointNum < internalJointCount; ++jointNum) { const TkAssetJointDesc& assetJointDesc = jointDescs[jointNum]; NVBLAST_ASSERT(assetJointDesc.nodeIndices[0] < graph.nodeCount && assetJointDesc.nodeIndices[1] < graph.nodeCount); TkJointDesc jointDesc; jointDesc.families[0] = jointDesc.families[1] = family; jointDesc.chunkIndices[0] = graph.chunkIndices[assetJointDesc.nodeIndices[0]]; jointDesc.chunkIndices[1] = graph.chunkIndices[assetJointDesc.nodeIndices[1]]; jointDesc.attachPositions[0] = assetJointDesc.attachPositions[0]; jointDesc.attachPositions[1] = assetJointDesc.attachPositions[1]; TkJointImpl* joint = new (joints + jointNum) TkJointImpl(jointDesc, family); actor->addJoint(joint->m_links[0]); } // Mark as damaged to trigger first split call. It could be the case that asset is already split into few actors initially. actor->markAsDamaged(); } return actor; } //////// Member functions //////// TkActorImpl::TkActorImpl() : m_actorLL(nullptr) , m_family(nullptr) , m_group(nullptr) , m_groupJobIndex(invalidIndex<uint32_t>()) , m_flags(0) , m_jointCount(0) { #if NV_PROFILE NvBlastTimersReset(&m_timers); #endif } TkActorImpl::~TkActorImpl() { } void TkActorImpl::release() { // Disassoaciate all joints // Copy joint array for safety against implementation of joint->setActor TkJointImpl** joints = reinterpret_cast<TkJointImpl**>(NvBlastAlloca(sizeof(TkJointImpl*)*getJointCountInternal())); TkJointImpl** stop = joints + getJointCountInternal(); TkJointImpl** jointHandle = joints; for (JointIt j(*this); (bool)j; ++j) { *jointHandle++ = *j; } jointHandle = joints; while (jointHandle < stop) { NVBLAST_ASSERT(*jointHandle != nullptr); NVBLAST_ASSERT((*jointHandle)->getDataInternal().actors[0] == this || (*jointHandle)->getDataInternal().actors[1] == this); (*jointHandle++)->setActors(nullptr, nullptr); } NVBLAST_ASSERT(getJointCountInternal() == 0); if (m_group != nullptr) { m_group->removeActor(*this); } if (m_actorLL != nullptr) { NvBlastActorDeactivate(m_actorLL, logLL); } if (m_family != nullptr) { m_family->removeActor(this); // Make sure we dispatch any remaining events when this family is emptied, since it will no longer be done by any group if (m_family->getActorCountInternal() == 0) { m_family->getQueue().dispatch(); } } } const NvBlastActor* TkActorImpl::getActorLL() const { return m_actorLL; } TkFamily& TkActorImpl::getFamily() const { return getFamilyImpl(); } uint32_t TkActorImpl::getIndex() const { return getIndexInternal(); } TkGroup* TkActorImpl::getGroup() const { return getGroupImpl(); } TkGroup* TkActorImpl::removeFromGroup() { if (m_group == nullptr) { NVBLAST_LOG_WARNING("TkActorImpl::removeFromGroup: actor not in a group."); return nullptr; } if (m_group->isProcessing()) { NVBLAST_LOG_ERROR("TkActorImpl::removeFromGroup: cannot alter Group while processing."); return nullptr; } TkGroup* group = m_group; return m_group->removeActor(*this) ? group : nullptr; } NvBlastFamily* TkActorImpl::getFamilyLL() const { return m_family->getFamilyLLInternal(); } const TkAsset* TkActorImpl::getAsset() const { return m_family->getAssetImpl(); } uint32_t TkActorImpl::getVisibleChunkCount() const { return NvBlastActorGetVisibleChunkCount(m_actorLL, logLL); } uint32_t TkActorImpl::getVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize) const { return NvBlastActorGetVisibleChunkIndices(visibleChunkIndices, visibleChunkIndicesSize, m_actorLL, logLL); } uint32_t TkActorImpl::getGraphNodeCount() const { return NvBlastActorGetGraphNodeCount(m_actorLL, logLL); } uint32_t TkActorImpl::getGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize) const { return NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeIndicesSize, m_actorLL, logLL); } const float* TkActorImpl::getBondHealths() const { return NvBlastActorGetBondHealths(m_actorLL, logLL); } uint32_t TkActorImpl::getSplitMaxActorCount() const { return NvBlastActorGetMaxActorCountForSplit(m_actorLL, logLL); } bool TkActorImpl::isDamaged() const { NVBLAST_ASSERT(!m_flags.isSet(TkActorFlag::DAMAGED) || (m_flags.isSet(TkActorFlag::DAMAGED) && m_flags.isSet(TkActorFlag::PENDING))); return m_flags.isSet(TkActorFlag::DAMAGED); } void TkActorImpl::markAsDamaged() { m_flags |= TkActorFlag::DAMAGED; makePending(); } void TkActorImpl::makePending() { if (m_group != nullptr && !isPending()) { m_group->enqueue(this); } m_flags |= TkActorFlag::PENDING; } TkActorImpl::operator Nv::Blast::TkActorData() const { TkActorData data = { m_family, userData, getIndex() }; return data; } void TkActorImpl::damage(const NvBlastDamageProgram& program, const void* programParams) { BLAST_PROFILE_SCOPE_L("TkActor::damage"); if (m_group == nullptr) { NVBLAST_LOG_WARNING("TkActor::damage: actor is not in a group, cannot fracture."); return; } if (m_group->isProcessing()) { NVBLAST_LOG_WARNING("TkActor::damage: group is being processed, cannot fracture this actor."); return; } if (NvBlastActorCanFracture(m_actorLL, logLL)) { m_damageBuffer.pushBack(DamageData{ program, programParams}); makePending(); } } void TkActorImpl::generateFracture(NvBlastFractureBuffers* commands, const NvBlastDamageProgram& program, const void* programParams) const { BLAST_PROFILE_SCOPE_L("TkActor::generateFracture"); if (m_group && m_group->isProcessing()) { NVBLAST_LOG_WARNING("TkActor::generateFracture: group is being processed, cannot fracture this actor."); return; } // const context, must make m_timers mutable otherwise NvBlastActorGenerateFracture(commands, m_actorLL, program, programParams, logLL, const_cast<NvBlastTimers*>(&m_timers)); } void TkActorImpl::applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands) { BLAST_PROFILE_SCOPE_L("TkActor::applyFracture"); if (m_group && m_group->isProcessing()) { NVBLAST_LOG_WARNING("TkActor::applyFracture: group is being processed, cannot fracture this actor."); return; } NvBlastActorApplyFracture(eventBuffers, m_actorLL, commands, logLL, &m_timers); if (commands->chunkFractureCount > 0 || commands->bondFractureCount > 0) { markAsDamaged(); TkFractureCommands* fevt = getFamilyImpl().getQueue().allocData<TkFractureCommands>(); fevt->tkActorData = *this; fevt->buffers = *commands; getFamilyImpl().getQueue().addEvent(fevt); getFamilyImpl().getQueue().dispatch(); } } uint32_t TkActorImpl::getJointCount() const { return getJointCountInternal(); } uint32_t TkActorImpl::getJoints(TkJoint** joints, uint32_t jointsSize) const { uint32_t jointsWritten = 0; for (JointIt j(*this); (bool)j && jointsWritten < jointsSize; ++j) { joints[jointsWritten++] = *j; } return jointsWritten; } bool TkActorImpl::hasExternalBonds() const { return NvBlastActorHasExternalBonds(m_actorLL, logLL); } } // namespace Blast } // namespace Nv
10,590
C++
27.394102
138
0.694523
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkGUID.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKGUID_H #define NVBLASTTKGUID_H #include "NvPreprocessor.h" #if NV_WINDOWS_FAMILY #include <rpc.h> #else //#include <uuid/uuid.h> #include "NvBlastTime.h" #endif #include "NsHash.h" namespace Nv { namespace Blast { #if NV_WINDOWS_FAMILY NV_INLINE NvBlastID TkGenerateGUID(void* ptr) { NV_UNUSED(ptr); NV_COMPILE_TIME_ASSERT(sizeof(UUID) == sizeof(NvBlastID)); NvBlastID guid; UuidCreate(reinterpret_cast<UUID*>(&guid)); return guid; } #else NV_INLINE NvBlastID TkGenerateGUID(void* ptr) { // NV_COMPILE_TIME_ASSERT(sizeof(uuid_t) == sizeof(NvBlastID)); Time time; NvBlastID guid; // uuid_generate_random(reinterpret_cast<uuid_t&>(guid)); *reinterpret_cast<uint64_t*>(guid.data) = reinterpret_cast<uintptr_t>(ptr); *reinterpret_cast<int64_t*>(guid.data + 8) = time.getLastTickCount(); return guid; } #endif /** Compares two NvBlastIDs. \param[in] id1 A pointer to the first id to compare. \param[in] id2 A pointer to the second id to compare. \return true iff ids are equal. */ NV_INLINE bool TkGUIDsEqual(const NvBlastID* id1, const NvBlastID* id2) { return !memcmp(id1, id2, sizeof(NvBlastID)); } /** Clears an NvBlastID (sets all of its fields to zero). \param[out] id A pointer to the ID to clear. */ NV_INLINE void TkGUIDReset(NvBlastID* id) { memset(id, 0, sizeof(NvBlastID)); } /** Tests an NvBlastID to determine if it's zeroed. After calling TkGUIDReset on an ID, passing it to this function will return a value of true. \param[in] id A pointer to the ID to test. */ NV_INLINE bool TkGUIDIsZero(const NvBlastID* id) { return *reinterpret_cast<const uint64_t*>(&id->data[0]) == 0 && *reinterpret_cast<const uint64_t*>(&id->data[8]) == 0; } } // namespace Blast } // namespace Nv namespace nvidia { namespace shdfnd { // hash specialization for NvBlastID template <> struct Hash<NvBlastID> { uint32_t operator()(const NvBlastID& k) const { // "DJB" string hash uint32_t h = 5381; for (uint32_t i = 0; i < sizeof(k.data) / sizeof(k.data[0]); ++i) h = ((h << 5) + h) ^ uint32_t(k.data[i]); return h; } bool equal(const NvBlastID& k0, const NvBlastID& k1) const { return Nv::Blast::TkGUIDsEqual(&k0, &k1); } }; } // namespace shdfnd } // namespace nvidia #endif // #ifndef NVBLASTTKGUID_H
3,944
C
25.655405
122
0.704615
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTaskImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTASKIMPL_H #define NVBLASTTKTASKIMPL_H #include "NvBlast.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkEventQueue.h" #include "NvBlastArray.h" #include <atomic> #include <mutex> #include <condition_variable> #include "NvBlastAssert.h" #include "NvBlastTkGroup.h" // TkGroupStats namespace Nv { namespace Blast { class TkGroupImpl; class TkActorImpl; class TkFamilyImpl; /** Transient structure describing a job and its results. */ struct TkWorkerJob { TkActorImpl* m_tkActor; //!< the actor to process TkActorImpl** m_newActors; //!< list of child actors created by splitting uint32_t m_newActorsCount; //!< the number of child actors created }; /** A list of equally sized memory blocks sharable between tasks. */ template<typename T> class SharedBlock { public: SharedBlock() : m_numElementsPerBlock(0), m_numBlocks(0), m_buffer(nullptr) {} /** Allocates one large memory block of elementsPerBlock*numBlocks elements. */ void allocate(uint32_t elementsPerBlock, uint32_t numBlocks) { NVBLAST_ASSERT(elementsPerBlock > 0 && numBlocks > 0); m_buffer = reinterpret_cast<T*>(NVBLAST_ALLOC_NAMED(elementsPerBlock*numBlocks*sizeof(T), "SharedBlock")); m_numElementsPerBlock = elementsPerBlock; m_numBlocks = numBlocks; } /** Returns the pointer to the first element of a block of numElementsPerBlock() elements. */ T* getBlock(uint32_t id) { NVBLAST_ASSERT(id < m_numBlocks || 0 == m_numElementsPerBlock); return &m_buffer[id*m_numElementsPerBlock]; } /** The number of elements available per block. */ uint32_t numElementsPerBlock() const { return m_numElementsPerBlock; } /** Frees the whole memory block. */ void release() { m_numBlocks = 0; m_numElementsPerBlock = 0; NVBLAST_FREE(m_buffer); m_buffer = nullptr; } private: uint32_t m_numElementsPerBlock; //!< elements available in one block uint32_t m_numBlocks; //!< number of virtual blocks available T* m_buffer; //!< contiguous memory for all blocks }; /** A preallocated, shared array from which can be allocated from in tasks. Intended to be used when the maximum amount of data (e.g. for a family) is known in advance. No further allocations take place on exhaustion. Exhaustion asserts in debug builds and overflows otherwise. */ template<typename T> class SharedBuffer { public: SharedBuffer() : m_capacity(0), m_used(0), m_buffer(nullptr) {} /** Atomically gets a pointer to the first element of an array of n elements. */ T* reserve(size_t n) { NVBLAST_ASSERT(m_used + n <= m_capacity); size_t start = m_used.fetch_add(n); return &m_buffer[start]; } /** Preallocates memory for capacity elements. */ void allocate(size_t capacity) { NVBLAST_ASSERT(m_buffer == nullptr); m_buffer = reinterpret_cast<T*>(NVBLAST_ALLOC_NAMED(capacity*sizeof(T), "SplitMemory")); m_capacity = capacity; } /** Preserves the memory allocated but resets to reserve from the beginning of the array. */ void reset() { m_used = 0; } /** Frees the preallocated array. */ void release() { NVBLAST_ASSERT(m_buffer != nullptr); NVBLAST_FREE(m_buffer); m_buffer = nullptr; m_capacity = m_used = 0; } private: size_t m_capacity; //!< available elements in the buffer std::atomic<size_t> m_used; //!< used elements in the buffer T* m_buffer; //!< the memory containing T's }; /** Allocates from a preallocated, externally owned memory block initialized with. When blocks run out of space, new ones are allocated and owned by this class. */ template<typename T> class LocalBuffer { public: /** Returns the pointer to the first element of an array of n elements. Allocates a new block of memory when exhausted, its size being the larger of n and capacity set with initialize(). */ T* allocate(size_t n) { if (m_used + n > m_capacity) { allocateNewBlock(n > m_capacity ? n : m_capacity); } size_t index = m_used; m_used += n; return &m_currentBlock[index]; } /** Release the additionally allocated memory blocks. The externally owned memory block remains untouched. */ void clear() { for (void* block : m_memoryBlocks) { NVBLAST_FREE(block); } m_memoryBlocks.clear(); } /** Set the externally owned memory block to start allocating from, with a size of capacity elements. */ void initialize(T* block, size_t capacity) { m_currentBlock = block; m_capacity = capacity; m_used = 0; } private: /** Allocates space for capacity elements. */ void allocateNewBlock(size_t capacity) { BLAST_PROFILE_SCOPE_L("Local Buffer allocation"); m_capacity = capacity; m_currentBlock = static_cast<T*>(NVBLAST_ALLOC_NAMED(capacity*sizeof(T), "Blast LocalBuffer")); m_memoryBlocks.pushBack(m_currentBlock); m_used = 0; } InlineArray<void*, 4>::type m_memoryBlocks; //!< storage for memory blocks T* m_currentBlock; //!< memory block used to allocate from size_t m_used; //!< elements used in current block size_t m_capacity; //!< elements available in current block }; /** Holds the memory used by TkWorker for each family in each group. */ class SharedMemory { public: SharedMemory() : m_eventsMemory(0), m_eventsCount(0), m_refCount(0) {} /** Reserves n entries from preallocated memory. */ NvBlastActor** reserveNewActors(size_t n) { return m_newActorBuffers.reserve(n); } /** Reserves n entries from preallocated memory. */ TkActor** reserveNewTkActors(size_t n) { return m_newTkActorBuffers.reserve(n); } /** Allocates buffers to hold */ void allocate(TkFamilyImpl&); /** Resets the internal buffers to reserve from their beginning. Preserves the allocated memory. */ void reset() { m_newActorBuffers.reset(); m_newTkActorBuffers.reset(); } /** Increments the reference count. */ void addReference() { m_refCount++; } /** Increments the reference count by n. */ void addReference(size_t n) { m_refCount += n; } /** Decrements the reference count. Returns true if the count reached zero. */ bool removeReference() { m_refCount--; return !isUsed(); } /** Checks if the reference count is not zero. */ bool isUsed() { return m_refCount > 0; } /** Release the internal buffers' memory. */ void release() { m_newActorBuffers.release(); m_newTkActorBuffers.release(); } TkEventQueue m_events; //!< event queue shared across a group's actors of the same family uint32_t m_eventsMemory; //!< expected memory size for event data uint32_t m_eventsCount; //!< expected number of events private: size_t m_refCount; //!< helper for usage and releasing memory SharedBuffer<NvBlastActor*> m_newActorBuffers; //!< memory for splitting SharedBuffer<TkActor*> m_newTkActorBuffers; //!< memory for split events }; /** Thread worker fracturing and splitting actors sequentially. The list of actual jobs is provided by the group owning this worker. */ class TkWorker final : public TkGroupWorker { public: TkWorker() : m_id(~(uint32_t)0), m_group(nullptr), m_isBusy(false) {} void process(uint32_t jobID); void initialize(); void process(TkWorkerJob& job); uint32_t m_id; //!< this worker's id TkGroupImpl* m_group; //!< the group owning this worker LocalBuffer<NvBlastChunkFractureData> m_chunkBuffer; //!< memory manager for chunk event data LocalBuffer<NvBlastBondFractureData> m_bondBuffer; //!< memory manager for bonds event data void* m_splitScratch; NvBlastFractureBuffers m_tempBuffer; bool m_isBusy; #if NV_PROFILE TkGroupStats m_stats; #endif }; } } #endif // NVBLASTTKTASKIMPL_H
10,422
C
26.9437
122
0.627807
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFamilyImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlastIndexFns.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { //////// Static data //////// NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Family); //////// Member functions //////// TkFamilyImpl::TkFamilyImpl() : m_familyLL(nullptr), m_internalJointCount(0), m_asset(nullptr) { } TkFamilyImpl::TkFamilyImpl(const NvBlastID& id) : TkFamilyType(id), m_familyLL(nullptr), m_internalJointCount(0), m_asset(nullptr) { } TkFamilyImpl::~TkFamilyImpl() { if (m_familyLL != nullptr) { uint32_t familyActorCount = NvBlastFamilyGetActorCount(m_familyLL, logLL); if (familyActorCount != 0) { NVBLAST_LOG_WARNING("TkFamilyImpl::~TkFamilyImpl(): family actor count is not 0."); } NVBLAST_FREE(m_familyLL); } } void TkFamilyImpl::release() { for (TkActorImpl& actor : m_actors) { if (actor.isActive()) { actor.release(); } } m_actors.clear(); NVBLAST_DELETE(this, TkFamilyImpl); } const NvBlastFamily* TkFamilyImpl::getFamilyLL() const { return m_familyLL; } TkActorImpl* TkFamilyImpl::addActor(NvBlastActor* actorLL) { TkActorImpl* actor = getActorByActorLL(actorLL); NVBLAST_ASSERT(actor); actor->m_actorLL = actorLL; actor->m_family = this; return actor; } void TkFamilyImpl::removeActor(TkActorImpl* actor) { NVBLAST_ASSERT(actor != nullptr && actor->m_family == this); //actor->m_family = nullptr; actor->m_actorLL = nullptr; } uint32_t TkFamilyImpl::getActorCount() const { return getActorCountInternal(); } uint32_t TkFamilyImpl::getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart /*= 0*/) const { uint32_t actorCount = getActorCount(); if (actorCount <= indexStart) { NVBLAST_LOG_WARNING("TkFamilyImpl::getActors: indexStart beyond end of actor list."); return 0; } actorCount -= indexStart; if (actorCount > bufferSize) { actorCount = static_cast<uint32_t>(bufferSize); } uint32_t index = 0; for (const TkActorImpl& actor : m_actors) { if (actor.isActive()) { if (index >= indexStart) { if ((index - indexStart) >= actorCount) { break; } else { *buffer++ = const_cast<TkActorImpl*>(&actor); } } index++; } } return actorCount; } NV_INLINE bool areLLActorsEqual(const NvBlastActor* actor0, const NvBlastActor* actor1, Array<uint32_t>::type& scratch) { if (NvBlastActorGetGraphNodeCount(actor0, logLL) != NvBlastActorGetGraphNodeCount(actor1, logLL)) { return false; } const uint32_t chunkCount = NvBlastActorGetVisibleChunkCount(actor0, logLL); if (chunkCount != NvBlastActorGetVisibleChunkCount(actor1, logLL)) { return false; } scratch.resize(chunkCount * 2); NvBlastActorGetVisibleChunkIndices(scratch.begin(), chunkCount, actor0, logLL); NvBlastActorGetVisibleChunkIndices(scratch.begin() + chunkCount, chunkCount, actor1, logLL); return memcmp(scratch.begin(), scratch.begin() + chunkCount, chunkCount * sizeof(uint32_t)) == 0; } void TkFamilyImpl::reinitialize(const NvBlastFamily* newFamily, TkGroup* group) { NVBLAST_ASSERT(newFamily); #if NV_ENABLE_ASSERTS NvBlastID id0 = NvBlastFamilyGetAssetID(m_familyLL, logLL); NvBlastID id1 = NvBlastFamilyGetAssetID(newFamily, logLL); NVBLAST_ASSERT(TkGUIDsEqual(&id0, &id1)); #endif NVBLAST_ASSERT(NvBlastFamilyGetSize(m_familyLL, logLL) == NvBlastFamilyGetSize(newFamily, logLL)); // alloc and init new family const uint32_t blockSize = NvBlastFamilyGetSize(newFamily, logLL); NvBlastFamily* newFamilyCopy = (NvBlastFamily*)NVBLAST_ALLOC_NAMED(blockSize, "TkFamilyImpl::reinitialize"); memcpy(newFamilyCopy, newFamily, blockSize); NvBlastFamilySetAsset(newFamilyCopy, m_asset->getAssetLL(), logLL); // get actors from new family Array<NvBlastActor*>::type newLLActors(NvBlastFamilyGetActorCount(newFamilyCopy, logLL)); uint32_t actorCount = NvBlastFamilyGetActors(newLLActors.begin(), newLLActors.size(), newFamilyCopy, logLL); // reset actor families to nullptr (we use it as a flag later) for (TkActorImpl& actor : m_actors) { if (actor.isActive()) { actor.m_family = nullptr; } } // prepare split event with new actors auto newActorsSplitEvent = getQueue().allocData<TkSplitEvent>(); Array<TkActor*>::type children(actorCount); children.resizeUninitialized(0); newActorsSplitEvent->children = children.begin(); // scratch Array<uint32_t>::type scratch(m_asset->getChunkCount()); for (uint32_t i = 0; i < actorCount; ++i) { NvBlastActor* newLLActor = newLLActors[i]; uint32_t actorIndex = NvBlastActorGetIndex(newLLActor, logLL); TkActorImpl& tkActor = *getActorByIndex(actorIndex); tkActor.m_family = this; if (!tkActor.isActive() || !areLLActorsEqual(newLLActor, tkActor.m_actorLL, scratch)) { if (tkActor.isActive()) { auto removeSplitEvent = getQueue().allocData<TkSplitEvent>(); removeSplitEvent->parentData.family = this; removeSplitEvent->numChildren = 0; removeSplitEvent->parentData.userData = tkActor.userData; removeSplitEvent->parentData.index = tkActor.getIndex(); getQueue().addEvent(removeSplitEvent); } tkActor.m_actorLL = newLLActor; // switch groups TkGroupImpl* prevGroup = tkActor.m_group; if (prevGroup != group) { if (prevGroup) { prevGroup->removeActor(tkActor); } if (group) { group->addActor(tkActor); } } children.pushBack(&tkActor); } else { tkActor.m_actorLL = newLLActor; } } // if m_family is still nullptr for an active actor -> remove it. It doesn't exist in new family. for (TkActorImpl& tkActor : m_actors) { if (tkActor.isActive() && tkActor.m_family == nullptr) { tkActor.m_family = this; if (tkActor.m_group) { tkActor.m_group->removeActor(tkActor); } auto removeSplitEvent = getQueue().allocData<TkSplitEvent>(); removeSplitEvent->parentData.family = this; removeSplitEvent->numChildren = 0; removeSplitEvent->parentData.userData = tkActor.userData; removeSplitEvent->parentData.index = tkActor.getIndex(); getQueue().addEvent(removeSplitEvent); tkActor.m_actorLL = nullptr; } } // add split event with all new actors newActorsSplitEvent->parentData.family = this; newActorsSplitEvent->parentData.userData = 0; newActorsSplitEvent->parentData.index = invalidIndex<uint32_t>(); newActorsSplitEvent->numChildren = children.size(); if (newActorsSplitEvent->numChildren > 0) { getQueue().addEvent(newActorsSplitEvent); } // replace family NVBLAST_FREE(m_familyLL); m_familyLL = newFamilyCopy; // update joints for (TkActorImpl& tkActor : m_actors) { if (!tkActor.m_jointList.isEmpty()) { updateJoints(&tkActor); } } getQueue().dispatch(); } TkActorImpl* TkFamilyImpl::getActorByChunk(uint32_t chunk) { if (chunk >= NvBlastAssetGetChunkCount(m_asset->getAssetLLInternal(), logLL)) { NVBLAST_LOG_WARNING("TkFamilyImpl::getActorByChunk: invalid chunk index. Returning NULL."); return nullptr; } NvBlastActor* actorLL = NvBlastFamilyGetChunkActor(m_familyLL, chunk, logLL); return actorLL ? getActorByActorLL(actorLL) : nullptr; } void TkFamilyImpl::applyFractureInternal(const NvBlastFractureBuffers* commands) { NvBlastSupportGraph graph = getAsset()->getGraph(); // apply bond fracture commands on relevant actors { TkActorImpl* currActor = nullptr; NvBlastBondFractureData* bondFractures = commands->bondFractures; uint32_t bondFracturesCount = 0; auto applyFracture = [&]() { if (bondFracturesCount > 0) { if (currActor != nullptr && currActor->isActive()) { NvBlastFractureBuffers newCommands; newCommands.bondFractures = bondFractures; newCommands.bondFractureCount = bondFracturesCount; newCommands.chunkFractures = nullptr; newCommands.chunkFractureCount = 0; currActor->applyFracture(nullptr, &newCommands); } bondFractures += bondFracturesCount; bondFracturesCount = 0; } }; for (uint32_t i = 0; i < commands->bondFractureCount; ++i, ++bondFracturesCount) { const NvBlastBondFractureData& command = commands->bondFractures[i]; uint32_t chunk0 = graph.chunkIndices[command.nodeIndex0]; uint32_t chunk1 = graph.chunkIndices[command.nodeIndex1]; TkActorImpl* actor0 = getActorByChunk(chunk0); TkActorImpl* actor1 = getActorByChunk(chunk1); if (actor0 != actor1) { // skipping this event, bond already broken actor0 = nullptr; } if (actor0 != currActor) { applyFracture(); currActor = actor0; } } if (bondFracturesCount > 0) { applyFracture(); } } // apply chunk fracture commands on relevant actors { TkActorImpl* currActor = nullptr; NvBlastChunkFractureData* chunkFractures = commands->chunkFractures; uint32_t chunkFracturesCount = 0; auto applyFracture = [&]() { if (chunkFracturesCount > 0) { if (currActor != nullptr && currActor->isActive()) { NvBlastFractureBuffers newCommands; newCommands.bondFractures = nullptr; newCommands.bondFractureCount = 0; newCommands.chunkFractures = chunkFractures; newCommands.chunkFractureCount = chunkFracturesCount; currActor->applyFracture(nullptr, &newCommands); } chunkFractures += chunkFracturesCount; chunkFracturesCount = 0; } }; for (uint32_t i = 0; i < commands->chunkFractureCount; ++i, ++chunkFracturesCount) { const NvBlastChunkFractureData& command = commands->chunkFractures[i]; TkActorImpl* actor = getActorByChunk(command.chunkIndex); if (actor != currActor) { applyFracture(); currActor = actor; } } if (chunkFracturesCount > 0) { applyFracture(); } } } void TkFamilyImpl::updateJoints(TkActorImpl* actor, TkEventQueue* alternateQueue) { // Copy joint array for safety against implementation of joint->setActor TkJointImpl** joints = reinterpret_cast<TkJointImpl**>(NvBlastAlloca(sizeof(TkJointImpl*)*actor->getJointCountInternal())); TkJointImpl** stop = joints + actor->getJointCountInternal(); TkJointImpl** jointHandle = joints; for (TkActorImpl::JointIt j(*actor); (bool)j; ++j) { *jointHandle++ = *j; } jointHandle = joints; while (jointHandle < stop) { TkJointImpl* joint = *jointHandle++; const TkJointData& data = joint->getDataInternal(); TkActorImpl* actor0 = data.actors[0] != nullptr ? static_cast<TkActorImpl&>(*data.actors[0]).getFamilyImpl().getActorByChunk(data.chunkIndices[0]) : nullptr; TkActorImpl* actor1 = data.actors[1] != nullptr ? static_cast<TkActorImpl&>(*data.actors[1]).getFamilyImpl().getActorByChunk(data.chunkIndices[1]) : nullptr; joint->setActors(actor0, actor1, alternateQueue); } } const TkAsset* TkFamilyImpl::getAsset() const { return m_asset; } //////// Static functions //////// TkFamilyImpl* TkFamilyImpl::create(const TkAssetImpl* asset) { TkFamilyImpl* family = NVBLAST_NEW(TkFamilyImpl); family->m_asset = asset; void* mem = NVBLAST_ALLOC_NAMED(NvBlastAssetGetFamilyMemorySize(asset->getAssetLL(), logLL), "TkFamilyImpl::create"); family->m_familyLL = NvBlastAssetCreateFamily(mem, asset->getAssetLL(), logLL); //family->addListener(*TkFrameworkImpl::get()); if (family->m_familyLL == nullptr) { NVBLAST_LOG_ERROR("TkFamilyImpl::create: low-level family could not be created."); family->release(); return nullptr; } uint32_t maxActorCount = NvBlastFamilyGetMaxActorCount(family->m_familyLL, logLL); family->m_actors.resize(maxActorCount); family->m_internalJointBuffer.resize(asset->getJointDescCountInternal() * sizeof(TkJointImpl), 0); family->m_internalJointCount = asset->getJointDescCountInternal(); return family; } TkJointImpl** TkFamilyImpl::createExternalJointHandle(const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1) { JointSet* jointSet; const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(otherFamilyID); uint32_t otherFamilyIndex; if (jointSetIndexEntry != nullptr) { otherFamilyIndex = jointSetIndexEntry->second; jointSet = m_jointSets[otherFamilyIndex]; } else { jointSet = NVBLAST_NEW(JointSet); NVBLAST_CHECK_ERROR(jointSet != nullptr, "TkFamilyImpl::addExternalJoint: failed to create joint set for other family ID.", return nullptr); jointSet->m_familyID = otherFamilyID; otherFamilyIndex = m_jointSets.size(); m_familyIDMap[otherFamilyID] = otherFamilyIndex; m_jointSets.pushBack(jointSet); } const ExternalJointKey key(chunkIndex0, chunkIndex1); const bool jointExists = jointSet->m_joints.find(key) != nullptr; NVBLAST_CHECK_WARNING(!jointExists, "TkFamilyImpl::addExternalJoint: joint already added.", return nullptr); return &jointSet->m_joints[key]; } bool TkFamilyImpl::deleteExternalJointHandle(TkJointImpl*& joint, const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1) { const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(otherFamilyID); if (jointSetIndexEntry != nullptr) { const uint32_t jointSetIndex = jointSetIndexEntry->second; ExternalJointKey jointKey = ExternalJointKey(chunkIndex0, chunkIndex1); const HashMap<ExternalJointKey, TkJointImpl*>::type::Entry* e = m_jointSets[jointSetIndex]->m_joints.find(jointKey); if (e != nullptr) { joint = e->second; // Return value that was stored m_jointSets[jointSetIndex]->m_joints.erase(jointKey); // Delete the joint set if it is empty if (m_jointSets[jointSetIndex]->m_joints.size() == 0) { NVBLAST_DELETE(m_jointSets[jointSetIndex], JointSet); m_jointSets.replaceWithLast(jointSetIndex); m_familyIDMap.erase(otherFamilyID); if (jointSetIndex < m_jointSets.size()) { m_familyIDMap[m_jointSets[jointSetIndex]->m_familyID] = jointSetIndex; } } return true; } } return false; } TkJointImpl* TkFamilyImpl::findExternalJoint(const TkFamilyImpl* otherFamily, ExternalJointKey key) const { const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(getFamilyID(otherFamily)); if (jointSetIndexEntry != nullptr) { const HashMap<ExternalJointKey, TkJointImpl*>::type::Entry* e = m_jointSets[jointSetIndexEntry->second]->m_joints.find(key); if (e != nullptr) { return e->second; } } return nullptr; } } // namespace Blast } // namespace Nv
18,252
C++
31.711469
148
0.633739
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTask.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastGlobals.h" #include "NvBlastTkTask.h" #include "NvCpuDispatcher.h" #include "NvBlastTkGroup.h" using namespace Nv::Blast; uint32_t TkGroupTaskManagerImpl::process(uint32_t workerCount) { NVBLAST_CHECK_WARNING(m_group != nullptr, "TkGroupTaskManager::process cannot process, no group set.", return 0); NVBLAST_CHECK_WARNING(m_sync.isDone(), "TkGroupTaskManager::process group is already being processed.", return 0); // at least one task must start, even when dispatcher has none specified uint32_t dispatcherThreads = m_taskManager.getCpuDispatcher()->getWorkerCount(); dispatcherThreads = dispatcherThreads > 0 ? dispatcherThreads : 1; // not expecting an arbitrary amount of tasks uint32_t availableTasks = TASKS_MAX_COUNT; // use workerCount tasks, unless dispatcher has less threads or less tasks are available uint32_t requestedTasks = workerCount > 0 ? workerCount : dispatcherThreads; requestedTasks = requestedTasks > dispatcherThreads ? dispatcherThreads : requestedTasks; requestedTasks = requestedTasks > availableTasks ? availableTasks : requestedTasks; // ensure the group has enough memory allocated for concurrent processing m_group->setWorkerCount(requestedTasks); // check if there is work to do uint32_t jobCount = m_group->startProcess(); if (jobCount) { // don't start more tasks than jobs are available requestedTasks = requestedTasks > jobCount ? jobCount : requestedTasks; // common counter for all tasks m_counter.reset(jobCount); // set to busy state m_sync.setCount(requestedTasks); // set up tasks for (uint32_t i = 0; i < requestedTasks; i++) { m_tasks[i].setup(m_group, &m_counter, &m_sync); m_tasks[i].setContinuation(m_taskManager, nullptr); m_tasks[i].removeReference(); } return requestedTasks; } // there was no work to be done return 0; } bool TkGroupTaskManagerImpl::wait(bool block) { if (block && !m_sync.isDone()) { m_sync.wait(); } if (m_sync.isDone()) { return m_group->endProcess(); } return false; } void TkGroupTaskManagerImpl::setGroup(TkGroup* group) { NVBLAST_CHECK_WARNING(m_sync.isDone(), "TkGroupTaskManager::setGroup trying to change group while processing.", return); m_group = group; } TkGroupTaskManager* TkGroupTaskManager::create(nvidia::task::NvTaskManager& taskManager, TkGroup* group) { return NVBLAST_NEW(TkGroupTaskManagerImpl) (taskManager, group); } void TkGroupTaskManagerImpl::release() { NVBLAST_CHECK_WARNING(m_sync.isDone(), "TkGroupTaskManager::release group is still being processed.", return); NVBLAST_DELETE(this, TkGroupTaskManagerImpl); }
4,386
C++
35.558333
124
0.719562
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkGroupImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "NvBlastAssert.h" #include "NvBlast.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkTaskImpl.h" #undef max #undef min #include <algorithm> using namespace nvidia; namespace Nv { namespace Blast { //////// Static data //////// NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Group); //////// Member functions //////// TkGroupImpl::TkGroupImpl() : m_actorCount(0), m_isProcessing(false) { #if NV_PROFILE memset(&m_stats, 0, sizeof(TkGroupStats)); #endif } TkGroupImpl::~TkGroupImpl() { NVBLAST_ASSERT(getActorCount() == 0); NVBLAST_ASSERT(m_sharedMemory.size() == 0); } void TkGroupImpl::release() { if (isProcessing()) { // abort all processing? NVBLAST_LOG_ERROR("TkGroup::release: cannot release Group while processing."); NVBLAST_ALWAYS_ASSERT_MESSAGE("TkGroup::release: cannot release Group while processing."); return; } for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it) { TkFamilyImpl* family = it->first; for (TkActorImpl& actor : family->getActorsInternal()) { if (actor.m_group == this) { removeActorInternal(actor); } } SharedMemory* mem = it->second; mem->release(); NVBLAST_DELETE(mem, SharedMemory); } m_sharedMemory.clear(); m_bondTempDataBlock.release(); m_chunkTempDataBlock.release(); m_bondEventDataBlock.release(); m_chunkEventDataBlock.release(); m_splitScratchBlock.release(); NVBLAST_DELETE(this, TkGroupImpl); } void TkGroupImpl::addActorsInternal(TkActorImpl** actors, uint32_t numActors) { for (uint32_t i = 0; i < numActors; i++) { addActorInternal(*actors[i]); } } void TkGroupImpl::addActorInternal(TkActorImpl& tkActor) { NVBLAST_ASSERT(tkActor.getGroup() == nullptr); tkActor.m_group = this; m_actorCount++; } bool TkGroupImpl::addActor(TkActor& actor) { TkActorImpl& tkActor = static_cast<TkActorImpl&>(actor); if (tkActor.getGroup() != nullptr) { NVBLAST_LOG_ERROR("TkGroup::addActor: actor already belongs to a Group. Remove from current group first."); return false; } if (isProcessing()) { NVBLAST_LOG_ERROR("TkGroup::addActor: cannot alter Group while processing."); return false; } // mark the actor that it now belongs to this group addActorInternal(tkActor); // actors that were fractured already or have damage requested // must be enqueued to be processed if (tkActor.isPending()) { enqueue(&tkActor); } TkFamilyImpl& family = tkActor.getFamilyImpl(); SharedMemory* mem = m_sharedMemory[&family]; if (mem == nullptr) { // the actor belongs to a family not involved in this group yet // shared memory must be allocated and temporary buffers adjusted accordingly BLAST_PROFILE_ZONE_BEGIN("family memory"); mem = NVBLAST_NEW(SharedMemory); mem->allocate(family); m_sharedMemory[&family] = mem; BLAST_PROFILE_ZONE_END("family memory"); BLAST_PROFILE_ZONE_BEGIN("group memory"); const uint32_t workerCount = m_workers.size(); NvBlastLog theLog = logLL; // this group's tasks will use one temporary buffer each, which is of max size of, for all families involved const size_t requiredScratch = NvBlastActorGetRequiredScratchForSplit(tkActor.getActorLL(), theLog); if (static_cast<size_t>(m_splitScratchBlock.numElementsPerBlock()) < requiredScratch) { m_splitScratchBlock.release(); m_splitScratchBlock.allocate(static_cast<uint32_t>(requiredScratch), workerCount); } // generate and apply fracture may create an entry for each bond const uint32_t bondCount = NvBlastAssetGetBondCount(tkActor.getAsset()->getAssetLL(), theLog); if (m_bondTempDataBlock.numElementsPerBlock() < bondCount) { m_bondTempDataBlock.release(); m_bondTempDataBlock.allocate(bondCount, workerCount); m_bondEventDataBlock.release(); m_bondEventDataBlock.allocate(bondCount, workerCount); } // apply fracture may create an entry for each lower-support chunk const uint32_t graphNodeCount = NvBlastAssetGetSupportGraph(tkActor.getAsset()->getAssetLL(), theLog).nodeCount; const uint32_t subsupportChunkCount = NvBlastAssetGetChunkCount(tkActor.getAsset()->getAssetLL(), theLog) - NvBlastAssetGetFirstSubsupportChunkIndex(tkActor.getAsset()->getAssetLL(), theLog); const uint32_t chunkCount = graphNodeCount + subsupportChunkCount; if (m_chunkTempDataBlock.numElementsPerBlock() < chunkCount) { m_chunkTempDataBlock.release(); m_chunkTempDataBlock.allocate(chunkCount, workerCount); m_chunkEventDataBlock.release(); m_chunkEventDataBlock.allocate(chunkCount, workerCount); } BLAST_PROFILE_ZONE_END("group memory"); } mem->addReference(); return true; } uint32_t TkGroupImpl::getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart /* = 0 */) const { BLAST_PROFILE_SCOPE_L("TkGroup::getActors"); uint32_t actorCount = m_actorCount; if (actorCount <= indexStart) { NVBLAST_LOG_WARNING("TkGroup::getActors: indexStart beyond end of actor list."); return 0; } actorCount -= indexStart; if (actorCount > bufferSize) { actorCount = bufferSize; } uint32_t index = 0; bool done = false; for (auto it = const_cast<TkGroupImpl*>(this)->m_sharedMemory.getIterator(); !it.done();++it) { TkFamilyImpl* fam = it->first; for (TkActorImpl& actor : fam->getActorsInternal()) { if (actor.m_group == this) { NVBLAST_ASSERT(actor.isActive()); if (index >= indexStart) { *buffer++ = &actor; } index++; done = (index - indexStart) >= actorCount; } if (done) break; } if (done) break; } return actorCount; } void TkGroupImpl::removeActorInternal(TkActorImpl& tkActor) { NVBLAST_ASSERT(tkActor.m_group == this); tkActor.m_group = nullptr; m_actorCount--; } void TkGroupImpl::releaseSharedMemory(TkFamilyImpl* fam, SharedMemory* mem) { NVBLAST_ASSERT(mem != nullptr && m_sharedMemory[fam] == mem); mem->release(); m_sharedMemory.erase(fam); NVBLAST_DELETE(mem, SharedMemory); } bool TkGroupImpl::removeActor(TkActor& actor) { TkActorImpl& tkActor = static_cast<TkActorImpl&>(actor); if (tkActor.getGroup() != this) { NVBLAST_LOG_ERROR("TkGroup::removeActor: actor does not belong to this Group."); return false; } if (isProcessing()) { NVBLAST_LOG_ERROR("TkGroup::removeActor: cannot alter Group while processing."); return false; } removeActorInternal(tkActor); // pending actors must be removed from the job queue as well if(tkActor.isPending()) { uint32_t index = tkActor.m_groupJobIndex; tkActor.m_groupJobIndex = invalidIndex<uint32_t>(); if (index < m_jobs.size()) { m_jobs.replaceWithLast(index); if (index < m_jobs.size()) { NVBLAST_ASSERT(m_jobs[index].m_tkActor->m_groupJobIndex == m_jobs.size()); NVBLAST_ASSERT(m_jobs[index].m_tkActor->isPending()); m_jobs[index].m_tkActor->m_groupJobIndex = index; } } } // if the actor is the last of its family in this group // the group-family memory can be released TkFamilyImpl* family = &tkActor.getFamilyImpl(); SharedMemory* mem = getSharedMemory(family); if (mem->removeReference()) { releaseSharedMemory(family, mem); } return true; } TkGroupImpl* TkGroupImpl::create(const TkGroupDesc& desc) { TkGroupImpl* group = NVBLAST_NEW(TkGroupImpl); group->setWorkerCount(desc.workerCount); return group; } void TkGroupImpl::setWorkerCount(uint32_t workerCount) { if (isProcessing()) { NVBLAST_LOG_WARNING("TkGroup::setWorkerCount: Group is still processing, call TkGroup::endProcess first."); return; } if (workerCount == 0) { NVBLAST_LOG_WARNING("TkGroup: attempting to create a Group with 0 workers. Forced to 1."); workerCount = 1; } if (workerCount != m_workers.size()) { m_workers.resize(workerCount); uint32_t workerId = 0; for (auto& worker : m_workers) { worker.m_id = workerId++; worker.m_group = this; } const uint32_t bondCount = m_bondTempDataBlock.numElementsPerBlock(); if (bondCount > 0) { m_bondTempDataBlock.release(); m_bondTempDataBlock.allocate(bondCount, workerCount); m_bondEventDataBlock.release(); m_bondEventDataBlock.allocate(bondCount, workerCount); } const uint32_t chunkCount = m_chunkTempDataBlock.numElementsPerBlock(); if (chunkCount > 0) { m_chunkTempDataBlock.release(); m_chunkTempDataBlock.allocate(chunkCount, workerCount); m_chunkEventDataBlock.release(); m_chunkEventDataBlock.allocate(chunkCount, workerCount); } const uint32_t scratchSize = m_splitScratchBlock.numElementsPerBlock(); if (scratchSize > 0) { m_splitScratchBlock.release(); m_splitScratchBlock.allocate(scratchSize, workerCount); } } } NV_INLINE uint32_t TkGroupImpl::getWorkerCount() const { return m_workers.size(); } uint32_t TkGroupImpl::startProcess() { BLAST_PROFILE_SCOPE_L("TkGroup::startProcess"); if (!setProcessing(true)) { NVBLAST_LOG_WARNING("TkGroup::process: Group is still processing, call TkGroup::endProcess first."); return 0; } if (m_jobs.size() > 0) { BLAST_PROFILE_ZONE_BEGIN("task setup"); BLAST_PROFILE_ZONE_BEGIN("setup job queue"); for (const auto& job : m_jobs) { const TkActorImpl* a = job.m_tkActor; SharedMemory* mem = getSharedMemory(&a->getFamilyImpl()); const uint32_t damageCount = a->m_damageBuffer.size(); // applyFracture'd actor do not necessarily have damage queued NVBLAST_ASSERT(damageCount > 0 || a->m_flags.isSet(TkActorFlag::DAMAGED)); // no reason to be here without these NVBLAST_ASSERT(a->m_flags.isSet(TkActorFlag::PENDING)); NVBLAST_ASSERT(a->m_group == this); // collect the amount of event payload memory to preallocate for TkWorkers mem->m_eventsMemory += damageCount * (sizeof(TkFractureCommands) + sizeof(TkFractureEvents)) + sizeof(TkSplitEvent); // collect the amount of event entries to preallocate for TkWorkers // (two TkFracture* events per damage plus one TkSplitEvent) mem->m_eventsCount += 2 * damageCount + 1; } BLAST_PROFILE_ZONE_END("setup job queue"); BLAST_PROFILE_ZONE_BEGIN("memory protect"); for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it) { // preallocate the event memory for TkWorkers SharedMemory* mem = it->second; mem->m_events.reserveData(mem->m_eventsMemory); mem->m_events.reserveEvents(mem->m_eventsCount); // these counters are not used anymore // reset them immediately for next time mem->m_eventsCount = 0; mem->m_eventsMemory = 0; // switch to parallel mode mem->m_events.protect(true); } BLAST_PROFILE_ZONE_END("memory protect"); BLAST_PROFILE_ZONE_END("task setup"); for (auto&worker : m_workers) { worker.initialize(); } return m_jobs.size(); } else { bool success = setProcessing(false); NVBLAST_ASSERT(success); NV_UNUSED(success); return 0; } } bool TkGroupImpl::endProcess() { if (isProcessing()) { BLAST_PROFILE_SCOPE_L("TkGroupImpl::endProcess"); if (m_jobs.size() > 0) { #if NV_PROFILE BLAST_PROFILE_ZONE_BEGIN("accumulate timers"); NvBlastTimers accumulated; NvBlastTimersReset(&accumulated); uint32_t jobCount = 0; int64_t workerTime = 0; for (TkWorker& worker : m_workers) { accumulated += worker.m_stats.timers; jobCount += worker.m_stats.processedActorsCount; workerTime += worker.m_stats.workerTime; } m_stats.timers = accumulated; m_stats.processedActorsCount = jobCount; m_stats.workerTime = workerTime; BLAST_PROFILE_ZONE_END("accumulate timers"); #endif BLAST_PROFILE_ZONE_BEGIN("job update"); for (auto& j : m_jobs) { if (j.m_newActorsCount) { TkFamilyImpl* fam = &j.m_tkActor->getFamilyImpl(); SharedMemory* mem = getSharedMemory(fam); // as LL is implemented, where newActorsCount the parent is always deleted removeActorInternal(*j.m_tkActor); mem->removeReference(); addActorsInternal(j.m_newActors, j.m_newActorsCount); mem->addReference(j.m_newActorsCount); // Update joints mem->m_events.protect(false); // allow allocations again BLAST_PROFILE_ZONE_BEGIN("updateJoints"); fam->updateJoints(j.m_tkActor, &mem->m_events); BLAST_PROFILE_ZONE_END("updateJoints"); } // virtually dequeue the actor // the queue itself is cleared right after this loop j.m_tkActor->m_flags.clear(TkActorFlag::PENDING); j.m_tkActor->m_groupJobIndex = invalidIndex<uint32_t>(); BLAST_PROFILE_ZONE_BEGIN("damageBuffer.clear"); j.m_tkActor->m_damageBuffer.clear(); BLAST_PROFILE_ZONE_END("damageBuffer.clear"); } m_jobs.clear(); BLAST_PROFILE_ZONE_END("job update"); BLAST_PROFILE_ZONE_BEGIN("event dispatch"); for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it) { BLAST_PROFILE_SCOPE_L("event dispatch"); TkFamilyImpl* family = it->first; SharedMemory* mem = it->second; NVBLAST_ASSERT(family != nullptr); NVBLAST_ASSERT(mem != nullptr && mem->isUsed()); // where no actor of a family has split, // its group/family event queue has not been // unprotected in the jobs loop above mem->m_events.protect(false); family->getQueue().dispatch(mem->m_events); mem->m_events.reset(); mem->reset(); } BLAST_PROFILE_ZONE_END("event dispatch"); BLAST_PROFILE_ZONE_BEGIN("event memory release"); for (auto& worker : m_workers) { worker.m_bondBuffer.clear(); worker.m_chunkBuffer.clear(); } BLAST_PROFILE_ZONE_END("event memory release"); } bool success = setProcessing(false); NVBLAST_ASSERT(success); return success; } return false; } bool TkGroupImpl::setProcessing(bool value) { bool expected = !value; return m_isProcessing.compare_exchange_strong(expected, value); } void TkGroupImpl::enqueue(TkActorImpl* tkActor) { NVBLAST_ASSERT(tkActor->getGroupImpl() != nullptr); NVBLAST_ASSERT(tkActor->getGroupImpl() == this); NVBLAST_ASSERT(isInvalidIndex(tkActor->m_groupJobIndex)); NVBLAST_ASSERT(isProcessing() == false); #if NV_DEBUG for (TkWorkerJob& j : m_jobs) { NVBLAST_ASSERT(j.m_tkActor != tkActor); } #endif tkActor->m_groupJobIndex = m_jobs.size(); TkWorkerJob& j = m_jobs.insert(); j.m_tkActor = tkActor; } TkGroupWorker* TkGroupImpl::acquireWorker() { BLAST_PROFILE_SCOPE_L("TkGroupImpl::acquireWorker"); std::unique_lock<std::mutex> lk(m_workerMtx); for (auto& worker:m_workers) { if (!worker.m_isBusy) { worker.m_isBusy = true; return &worker; } } return nullptr; } void TkGroupImpl::returnWorker(TkGroupWorker* worker) { BLAST_PROFILE_SCOPE_L("TkGroupImpl::returnWorker"); std::unique_lock<std::mutex> lk(m_workerMtx); auto w = static_cast<TkWorker*>(worker); NVBLAST_CHECK_WARNING(w->m_group == this, "TkGroup::returnWorker worker does not belong to this group.", return); w->m_isBusy = false; } } // namespace Blast } // namespace Nv
19,115
C++
30.082927
128
0.611405
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTaskImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTime.h" #include "NvBlastTkTaskImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkGroupImpl.h" using namespace Nv::Blast; void SharedMemory::allocate(TkFamilyImpl& tkFamily) { NVBLAST_ASSERT(m_refCount == 0); const NvBlastAsset* assetLL = tkFamily.getAsset()->getAssetLL(); // at most leafChunkCount actors can be created within a family // tasks will grab their portion out of these memory blocks uint32_t leafChunkCount = NvBlastAssetGetLeafChunkCount(assetLL, logLL); m_newActorBuffers.allocate(2 * leafChunkCount); // GWD-167 workaround (2*) m_newTkActorBuffers.allocate(leafChunkCount); } /** Creates a TkEvent::FractureCommand according to the input buffer for tkActor into events queue using the LocalBuffers to store the actual event data. */ NV_FORCE_INLINE void reportFractureCommands( const NvBlastFractureBuffers& buffer, LocalBuffer<NvBlastBondFractureData>& bondBuffer, LocalBuffer<NvBlastChunkFractureData>& chunkBuffer, TkEventQueue& events, const TkActorImpl* tkActor) { NvBlastBondFractureData* bdata = nullptr; if (buffer.bondFractureCount > 0) { bdata = bondBuffer.allocate(buffer.bondFractureCount); memcpy(bdata, buffer.bondFractures, sizeof(NvBlastBondFractureData)*buffer.bondFractureCount); } NvBlastChunkFractureData* cdata = nullptr; if (buffer.chunkFractureCount > 0) { cdata = chunkBuffer.allocate(buffer.chunkFractureCount); memcpy(cdata, buffer.chunkFractures, sizeof(NvBlastChunkFractureData)*buffer.chunkFractureCount); } TkFractureCommands* fevt = events.allocData<TkFractureCommands>(); fevt->tkActorData = *tkActor; fevt->buffers = { buffer.bondFractureCount, buffer.chunkFractureCount, bdata, cdata }; events.addEvent(fevt); } /** Creates a TkEvent::FractureEvent according to the input buffer for tkActor into events queue using the LocalBuffers to store the actual event data. */ NV_FORCE_INLINE void reportFractureEvents( const NvBlastFractureBuffers& buffer, LocalBuffer<NvBlastBondFractureData>& bondBuffer, LocalBuffer<NvBlastChunkFractureData>& chunkBuffer, TkEventQueue& events, const TkActorImpl* tkActor) { uint32_t result[4] = { 0,0,0,0 }; NvBlastBondFractureData* bdata = nullptr; if (buffer.bondFractureCount > 0) { bdata = bondBuffer.allocate(buffer.bondFractureCount); for (uint32_t b = 0; b < buffer.bondFractureCount; ++b) { bdata[b] = buffer.bondFractures[b]; result[buffer.bondFractures[b].health > 0 ? 0 : 1]++; } } NvBlastChunkFractureData* cdata = nullptr; if (buffer.chunkFractureCount > 0) { cdata = chunkBuffer.allocate(buffer.chunkFractureCount); for (uint32_t c = 0; c < buffer.chunkFractureCount; ++c) { cdata[c] = buffer.chunkFractures[c]; result[buffer.chunkFractures[c].health > 0 ? 2 : 3]++; } } TkFractureEvents* fevt = events.allocData<TkFractureEvents>(); fevt->tkActorData = *tkActor; fevt->buffers = { buffer.bondFractureCount, buffer.chunkFractureCount, bdata, cdata }; fevt->bondsDamaged = result[0]; fevt->bondsBroken = result[1]; fevt->chunksDamaged = result[2]; fevt->chunksBroken = result[3]; events.addEvent(fevt); } void TkWorker::initialize() { // temporary memory used to generate and apply fractures // it must fit for the largest family involved in the group that owns this worker NvBlastBondFractureData* bondFractureData = m_group->m_bondTempDataBlock.getBlock(m_id); uint32_t bondFractureCount = m_group->m_bondTempDataBlock.numElementsPerBlock(); NvBlastChunkFractureData* chunkFractureData = m_group->m_chunkTempDataBlock.getBlock(m_id); uint32_t chunkFractureCount = m_group->m_chunkTempDataBlock.numElementsPerBlock(); m_tempBuffer = { bondFractureCount, chunkFractureCount, bondFractureData, chunkFractureData }; // temporary memory used to split the actor // large enough for the largest family involved m_splitScratch = m_group->m_splitScratchBlock.getBlock(m_id); // to avoid unnecessary allocations, preallocated memory exists to fit all chunks and bonds taking damage once // where multiple damage occurs, more memory will be allocated on demand (this may thwart other threads doing the same) m_bondBuffer.initialize(m_group->m_bondEventDataBlock.getBlock(m_id), m_group->m_bondEventDataBlock.numElementsPerBlock()); m_chunkBuffer.initialize(m_group->m_chunkEventDataBlock.getBlock(m_id), m_group->m_chunkEventDataBlock.numElementsPerBlock()); #if NV_PROFILE NvBlastTimersReset(&m_stats.timers); m_stats.processedActorsCount = 0; #endif } void TkWorker::process(TkWorkerJob& j) { NvBlastTimers* timers = nullptr; BLAST_PROFILE_SCOPE_M("TkActor"); TkActorImpl* tkActor = j.m_tkActor; const uint32_t tkActorIndex = tkActor->getIndex(); NvBlastActor* actorLL = tkActor->getActorLLInternal(); TkFamilyImpl& family = tkActor->getFamilyImpl(); SharedMemory* mem = m_group->getSharedMemory(&family); TkEventQueue& events = mem->m_events; NVBLAST_ASSERT(tkActor->getGroupImpl() == m_group); NVBLAST_ASSERT(tkActor->m_flags.isSet(TkActorFlag::PENDING)); #if NV_PROFILE timers = &m_stats.timers; *timers += tkActor->m_timers; NvBlastTimersReset(&tkActor->m_timers); m_stats.processedActorsCount++; #endif // generate and apply fracture for all damage requested on this actor // and queue events accordingly for (const auto& damage : tkActor->m_damageBuffer) { NvBlastFractureBuffers commandBuffer = m_tempBuffer; BLAST_PROFILE_ZONE_BEGIN("Material"); NvBlastActorGenerateFracture(&commandBuffer, actorLL, damage.program, damage.programParams, logLL, timers); BLAST_PROFILE_ZONE_END("Material"); if (commandBuffer.chunkFractureCount > 0 || commandBuffer.bondFractureCount > 0) { BLAST_PROFILE_SCOPE_M("Fill Command Events"); reportFractureCommands(commandBuffer, m_bondBuffer, m_chunkBuffer, events, tkActor); } NvBlastFractureBuffers eventBuffer = m_tempBuffer; BLAST_PROFILE_ZONE_BEGIN("Fracture"); NvBlastActorApplyFracture(&eventBuffer, actorLL, &commandBuffer, logLL, timers); BLAST_PROFILE_ZONE_END("Fracture"); if (eventBuffer.chunkFractureCount > 0 || eventBuffer.bondFractureCount > 0) { BLAST_PROFILE_SCOPE_M("Fill Fracture Events"); tkActor->m_flags |= (TkActorFlag::DAMAGED); reportFractureEvents(eventBuffer, m_bondBuffer, m_chunkBuffer, events, tkActor); } } // split the actor, which could have been damaged directly though the TkActor's fracture functions // i.e. it did not have damage queued for the above loop NvBlastActorSplitEvent splitEvent = { nullptr, nullptr }; if (tkActor->isDamaged()) { BLAST_PROFILE_ZONE_BEGIN("Split Memory"); uint32_t maxActorCount = NvBlastActorGetMaxActorCountForSplit(actorLL, logLL); splitEvent.newActors = mem->reserveNewActors(maxActorCount); BLAST_PROFILE_ZONE_END("Split Memory"); BLAST_PROFILE_ZONE_BEGIN("Split"); j.m_newActorsCount = NvBlastActorSplit(&splitEvent, actorLL, maxActorCount, m_splitScratch, logLL, timers); BLAST_PROFILE_ZONE_END("Split"); tkActor->m_flags.clear(TkActorFlag::DAMAGED); } else { j.m_newActorsCount = 0; } // update the TkActor according to the LL split results and queue events accordingly if (j.m_newActorsCount > 0) { NVBLAST_ASSERT(splitEvent.deletedActor == tkActor->getActorLL()); BLAST_PROFILE_ZONE_BEGIN("memory new actors"); auto tkSplitEvent = events.allocData<TkSplitEvent>(); tkSplitEvent->children = mem->reserveNewTkActors(j.m_newActorsCount); tkSplitEvent->numChildren = j.m_newActorsCount; tkSplitEvent->parentData.family = &family; tkSplitEvent->parentData.userData = tkActor->userData; tkSplitEvent->parentData.index = tkActorIndex; family.removeActor(tkActor); BLAST_PROFILE_ZONE_END("memory new actors"); BLAST_PROFILE_ZONE_BEGIN("create new actors"); for (uint32_t i = 0; i < j.m_newActorsCount; ++i) { TkActorImpl* newActor = family.addActor(splitEvent.newActors[i]); tkSplitEvent->children[i] = newActor; } j.m_newActors = reinterpret_cast<TkActorImpl**>(tkSplitEvent->children); BLAST_PROFILE_ZONE_END("create new actors"); BLAST_PROFILE_ZONE_BEGIN("split event"); events.addEvent(tkSplitEvent); BLAST_PROFILE_ZONE_END("split event"); } j.m_tkActor->m_flags.clear(TkActorFlag::PENDING); } void TkWorker::process(uint32_t jobID) { TkWorkerJob& j = m_group->m_jobs[jobID]; process(j); }
10,597
C++
38.107011
130
0.70888
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkActorImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKACTORIMPL_H #define NVBLASTTKACTORIMPL_H #include "NvBlastTkCommon.h" #include "NvBlastAssert.h" #include "NvBlastDLink.h" #include "NvBlastIteratorBase.h" #include "NvBlastTkJointImpl.h" #include "NvBlast.h" #include "NvBlastTkActor.h" #include "NvFlags.h" namespace Nv { namespace Blast { // Forward declarations: class TkGroupImpl; class TkFamilyImpl; class TkAssetImpl; class TkJointImpl; /** Struct-enum for actor status flags, used in TkGroup processing. */ struct TkActorFlag { enum Enum { DAMAGED = (1 << 0), //!< The actor had fractures applied successfully and will take the split step. PENDING = (1 << 1), //!< The actor will be processed when its group executes, used to update job queues when moving group. }; }; /** Implementation of TkActor. */ class TkActorImpl : public TkActor { public: TkActorImpl(); ~TkActorImpl(); // Begin TkActor virtual const NvBlastActor* getActorLL() const override; virtual TkFamily& getFamily() const override; virtual uint32_t getIndex() const override; virtual TkGroup* getGroup() const override; virtual TkGroup* removeFromGroup() override; virtual const TkAsset* getAsset() const override; virtual uint32_t getVisibleChunkCount() const override; virtual uint32_t getVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize) const override; virtual uint32_t getGraphNodeCount() const override; virtual uint32_t getGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize) const override; virtual const float* getBondHealths() const override; virtual uint32_t getSplitMaxActorCount() const override; virtual void damage(const NvBlastDamageProgram& program, const void* programParams) override; virtual bool isPending() const override; virtual void generateFracture(NvBlastFractureBuffers* commands, const NvBlastDamageProgram& program, const void* programParams) const override; virtual void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands) override; virtual uint32_t getJointCount() const override; virtual uint32_t getJoints(TkJoint** joints, uint32_t jointsSize) const override; virtual bool hasExternalBonds() const override; // End TkActor // Begin TkObject virtual void release() override; // End TkObject // Public methods /** Factory create method. \param[in] desc Actor descriptor set by the user. \return a pointer to a new TkActorImpl object if successful, NULL otherwise. */ static TkActorImpl* create(const TkActorDesc& desc); /** TkActorImpl objects are created in an array within a TkFamilyImpl. Actors may become 'inactive' without their memory being freed. If inactive, the actor should be treated as if it has been released. \return the active status of this TkActorImpl. */ bool isActive() const; /** Utility to return the low-level family to which the low-level actor belongs. \return a pointer to the NvBlastFamily to which the low-level actor belongs. */ NvBlastFamily* getFamilyLL() const; /** Utility to access the TkFamily to which this actor belongs. \return a reference to the TkFamilyImpl to which this TkActorImpl belongs. */ TkFamilyImpl& getFamilyImpl() const; /** \return the index of this actor with its TkFamilyImpl. */ uint32_t getIndexInternal() const; /** Access to the group to which this actor belongs, if any. \return a pointer to the TkGroupImpl to which this TkActorImpl belongs, if any. If this actor is not in a group, this function returns NULL. */ TkGroupImpl* getGroupImpl() const; /** Access to the low-level actor associated with this TkActorImpl. \return a pointer to the NvBlastActor associated with this TkActorImpl. If this actor is inactive (see isActive), this function returns NULL. */ NvBlastActor* getActorLLInternal() const; /** \return the number of TkJointImpl objects that reference this actor. */ uint32_t getJointCountInternal() const; /** Joint iterator. Usage: Given a TkActorImpl a, for (TkActorImpl::JointIt i(a); (bool)i; ++i) { TkJointImpl* joint = (TkJointImpl*)i; // ... } */ class JointIt : public DList::It { public: /** Constructed from an actor. */ JointIt(const TkActorImpl& actor, Direction dir = Forward); /** Current joint. */ TkJointImpl* operator * () const; }; /** Implicit converter to TkActorData for events. */ operator Nv::Blast::TkActorData() const; private: /** Functions to raise or check 'damaged' state: this actor will take the split step. 'damaged' actors automatically become 'pending' also. */ void markAsDamaged(); bool isDamaged() const; /** Raise actor to 'pending' state: this actor will be processed when its group executes next. Enqueues the actor in its group's job list if a group is set. Otherwise the group will enqueue the actor when it is added. */ void makePending(); /** Functions to add or remove an internal reference to a joint. (Joints and actors mutually reference each other.) */ void addJoint(TkJointLink& jointLink); void removeJoint(TkJointLink& jointLink); struct DamageData { NvBlastDamageProgram program; const void* programParams; }; // Data NvBlastActor* m_actorLL; //!< The low-level actor associated with this actor TkFamilyImpl* m_family; //!< The TkFamilyImpl to which this actor belongs TkGroupImpl* m_group; //!< The TkGroupImpl (if any) to which this actor belongs uint32_t m_groupJobIndex; //!< The index of this actor's job within its group's job list nvidia::NvFlags<TkActorFlag::Enum, char> m_flags; //!< Status flags for this actor Array<DamageData>::type m_damageBuffer; //!< Buffered damage input uint32_t m_jointCount; //!< The number of joints referenced in m_jointList DList m_jointList; //!< A doubly-linked list of joint references //#if NV_PROFILE NvBlastTimers m_timers; //!< If profiling, each actor stores timing data //#endif friend class TkWorker; // m_damageBuffer and m_flags friend class TkGroupImpl; friend class TkFamilyImpl; friend class TkJointImpl; friend class TkFrameworkImpl; }; //////// TkActorImpl inline methods //////// NV_INLINE TkFamilyImpl& TkActorImpl::getFamilyImpl() const { NVBLAST_ASSERT(m_family != nullptr); return *m_family; } NV_INLINE uint32_t TkActorImpl::getIndexInternal() const { NVBLAST_ASSERT(isActive()); return NvBlastActorGetIndex(m_actorLL, logLL); } NV_INLINE NvBlastActor* TkActorImpl::getActorLLInternal() const { return m_actorLL; } NV_INLINE uint32_t TkActorImpl::getJointCountInternal() const { return m_jointCount; } NV_INLINE TkGroupImpl* TkActorImpl::getGroupImpl() const { return m_group; } NV_INLINE bool TkActorImpl::isActive() const { return m_actorLL != nullptr; } NV_INLINE bool TkActorImpl::isPending() const { return m_flags.isSet(TkActorFlag::PENDING); } NV_INLINE void TkActorImpl::addJoint(TkJointLink& jointLink) { NVBLAST_ASSERT(m_jointList.isSolitary(jointLink)); m_jointList.insertHead(jointLink); ++m_jointCount; } NV_INLINE void TkActorImpl::removeJoint(TkJointLink& jointLink) { NVBLAST_ASSERT(!m_jointList.isSolitary(jointLink)); NVBLAST_ASSERT(m_jointCount > 0); if (m_jointCount > 0) { --m_jointCount; m_jointList.remove(jointLink); } } //////// TkActorImpl::JointIt methods //////// NV_INLINE TkActorImpl::JointIt::JointIt(const TkActorImpl& actor, Direction dir) : DList::It(actor.m_jointList, dir) {} NV_INLINE TkJointImpl* TkActorImpl::JointIt::operator * () const { const DLink* link = (const DLink*)(*this); return reinterpret_cast<const TkJointLink*>(link)->m_joint; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKACTORIMPL_H
10,565
C
29.894737
162
0.654614
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkCommon.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKCOMMON_H #define NVBLASTTKCOMMON_H #include "NvBlastGlobals.h" #include "NvBlastTkGUID.h" // Macro to define standard object classes. An intermediate class is defined which holds common implementations. #define NVBLASTTK_IMPL_DECLARE(_name) \ class Tk##_name##Type : public Tk##_name \ { \ public: \ /* Blank constructor generates a new NvBlastID and informs framework */ \ Tk##_name##Type() \ { \ memset(&m_ID, 0, sizeof(NvBlastID)); \ setID(TkGenerateGUID(this)); \ TkFrameworkImpl::get()->onCreate(*this); \ } \ \ /* This constructor takes an existing NvBlastID and informs framework */ \ Tk##_name##Type(const NvBlastID& id) \ { \ memset(&m_ID, 0, sizeof(NvBlastID)); \ setID(id); \ TkFrameworkImpl::get()->onCreate(*this); \ } \ \ /* Destructor informs framework */ \ ~Tk##_name##Type() { TkFrameworkImpl::get()->onDestroy(*this); } \ \ /* Begin TkIdentifiable */ \ virtual void setID(const NvBlastID& id) override \ { \ /* Inform framework of ID change */ \ TkFrameworkImpl::get()->onIDChange(*this, m_ID, id); \ m_ID = id; \ } \ virtual const NvBlastID& getID() const override { return getIDInternal(); } \ virtual const TkType& getType() const override { return s_type; } \ /* End TkIdentifiable */ \ \ /* Begin public API */ \ \ /* Inline method for internal access to NvBlastID */ \ const NvBlastID& getIDInternal() const { return m_ID; } \ \ /* End public API */ \ \ /* Static type information */ \ static TkTypeImpl s_type; \ \ private: \ NvBlastID m_ID; /* NvBlastID for a TkIdentifiable object */ \ }; \ \ /* Derive object implementation from common implementation class above */ \ class Tk##_name##Impl final : public Tk##_name##Type // Macro to declare standard object interfaces, enums, etc. #define NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE(_id0, _id1, _id2, _id3) \ /* Begin TkObject */ \ virtual void release() override; \ /* End TkObject */ \ \ /* Enums */ \ \ /* Generate a ClassID enum used to identify this TkIdentifiable. */ \ enum { ClassID = NVBLAST_FOURCC(_id0, _id1, _id2, _id3) } // Macro to define class type data #define NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(_name) \ TkTypeImpl Tk##_name##Type::s_type("Tk" #_name, Tk##_name##Impl::ClassID, 0) #endif // ifndef NVBLASTTKCOMMON_H
6,979
C
64.233644
113
0.391317
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkAssetImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlast.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { //////// Static data //////// NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Asset); //////// Member functions //////// TkAssetImpl::TkAssetImpl() : m_assetLL(nullptr), m_ownsAsset(false) { } TkAssetImpl::TkAssetImpl(const NvBlastID& id) : TkAssetType(id), m_assetLL(nullptr), m_ownsAsset(false) { } TkAssetImpl::~TkAssetImpl() { if (m_assetLL != nullptr && m_ownsAsset) { NVBLAST_FREE(m_assetLL); } } const NvBlastAsset* TkAssetImpl::getAssetLL() const { return getAssetLLInternal(); } uint32_t TkAssetImpl::getChunkCount() const { return NvBlastAssetGetChunkCount(m_assetLL, logLL); } uint32_t TkAssetImpl::getLeafChunkCount() const { return NvBlastAssetGetLeafChunkCount(m_assetLL, logLL); } uint32_t TkAssetImpl::getBondCount() const { return NvBlastAssetGetBondCount(m_assetLL, logLL); } const NvBlastChunk* TkAssetImpl::getChunks() const { return NvBlastAssetGetChunks(m_assetLL, logLL); } const NvBlastBond* TkAssetImpl::getBonds() const { return NvBlastAssetGetBonds(m_assetLL, logLL); } const NvBlastSupportGraph TkAssetImpl::getGraph() const { return NvBlastAssetGetSupportGraph(m_assetLL, logLL); } uint32_t TkAssetImpl::getDataSize() const { return NvBlastAssetGetSize(m_assetLL, logLL); } uint32_t TkAssetImpl::getJointDescCount() const { return getJointDescCountInternal(); } const TkAssetJointDesc* TkAssetImpl::getJointDescs() const { return getJointDescsInternal(); } void TkAssetImpl::release() { const TkType& tkType = TkFamilyImpl::s_type; const uint32_t num = TkFrameworkImpl::get()->getObjectCount(tkType); if (num) { Array<TkIdentifiable*>::type dependents(num); TkFrameworkImpl::get()->getObjects(dependents.begin(), dependents.size(), tkType); for (TkObject* o : dependents) { TkFamilyImpl* f = static_cast<TkFamilyImpl*>(o); if (f->getAssetImpl() == this) { f->release(); } } } NVBLAST_DELETE(this, TkAssetImpl); } //////// Static functions //////// TkAssetImpl* TkAssetImpl::create(const TkAssetDesc& desc) { TkAssetImpl* asset = NVBLAST_NEW(TkAssetImpl); Array<char>::type scratch((uint32_t)NvBlastGetRequiredScratchForCreateAsset(&desc, logLL)); void* mem = NVBLAST_ALLOC_NAMED(NvBlastGetAssetMemorySize(&desc, logLL), "TkAssetImpl::create"); asset->m_assetLL = NvBlastCreateAsset(mem, &desc, scratch.begin(), logLL); if (asset->m_assetLL == nullptr) { NVBLAST_LOG_ERROR("TkAssetImpl::create: low-level asset could not be created."); asset->release(); return nullptr; } if (desc.bondFlags != nullptr) { for (uint32_t bondN = 0; bondN < desc.bondCount; ++bondN) { if (0 != (desc.bondFlags[bondN] & TkAssetDesc::BondJointed)) { const NvBlastBondDesc& bondDesc = desc.bondDescs[bondN]; const uint32_t c0 = bondDesc.chunkIndices[0]; const uint32_t c1 = bondDesc.chunkIndices[1]; if (c0 >= desc.chunkCount || c1 >= desc.chunkCount) { NVBLAST_LOG_WARNING("TkAssetImpl::create: joint flag set for badly described bond. No joint descriptor created."); continue; } if (!asset->addJointDesc(c0, c1)) { NVBLAST_LOG_WARNING("TkAssetImpl::create: no bond corresponds to the user-described bond indices. No joint descriptor created."); } } } } asset->m_ownsAsset = true; // asset->setID(NvBlastAssetGetID(asset->m_assetLL, logLL)); // Keeping LL and Tk IDs distinct return asset; } TkAssetImpl* TkAssetImpl::create(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs, uint32_t jointDescCount, bool ownsAsset) { TkAssetImpl* asset = NVBLAST_NEW(TkAssetImpl); //NOTE: Why are we passing in a const NvBlastAsset* and then discarding the const? asset->m_assetLL = const_cast<NvBlastAsset*>(assetLL); if (asset->m_assetLL == nullptr) { NVBLAST_LOG_ERROR("TkAssetImpl::create: low-level asset could not be created."); asset->release(); return nullptr; } asset->m_ownsAsset = ownsAsset; asset->setID(NvBlastAssetGetID(asset->m_assetLL, logLL)); asset->m_jointDescs.resize(jointDescCount); for (uint32_t i = 0; i < asset->m_jointDescs.size(); ++i) { asset->m_jointDescs[i] = jointDescs[i]; } return asset; } bool TkAssetImpl::addJointDesc(uint32_t chunkIndex0, uint32_t chunkIndex1) { if (m_assetLL == nullptr) { return false; } const uint32_t upperSupportChunkCount = NvBlastAssetGetFirstSubsupportChunkIndex(m_assetLL, logLL); if (chunkIndex0 >= upperSupportChunkCount || chunkIndex1 >= upperSupportChunkCount) { return false; } const uint32_t* chunkToGraphNodeMap = NvBlastAssetGetChunkToGraphNodeMap(m_assetLL, logLL); const uint32_t node0 = chunkToGraphNodeMap[chunkIndex0]; const uint32_t node1 = chunkToGraphNodeMap[chunkIndex1]; const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(m_assetLL, logLL); if (node0 >= graph.nodeCount || node1 >= graph.nodeCount) { return false; } // Find bond index // Iterate through all neighbors of node0 chunk uint32_t bondIndex = 0xFFFFFFFF; for (uint32_t i = graph.adjacencyPartition[node0]; i < graph.adjacencyPartition[node0 + 1]; i++) { if (graph.adjacentNodeIndices[i] == node1) { bondIndex = graph.adjacentBondIndices[i]; break; } } if (bondIndex >= NvBlastAssetGetBondCount(m_assetLL, logLL)) { return false; } const NvBlastBond& bond = NvBlastAssetGetBonds(m_assetLL, logLL)[bondIndex]; TkAssetJointDesc jointDesc; jointDesc.attachPositions[0] = jointDesc.attachPositions[1] = nvidia::NvVec3(bond.centroid[0], bond.centroid[1], bond.centroid[2]); jointDesc.nodeIndices[0] = node0; jointDesc.nodeIndices[1] = node1; m_jointDescs.pushBack(jointDesc); return true; } } // namespace Blast } // namespace Nv
8,002
C++
27.996377
150
0.673957
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTypeImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTYPEIMPL_H #define NVBLASTTKTYPEIMPL_H #include "NvPreprocessor.h" #include "NvBlastTkType.h" namespace Nv { namespace Blast { /** Implementation of TkType, storing class information for TkIdentifiable-derived classes. */ class TkTypeImpl : public TkType { public: TkTypeImpl(const char* typeName, uint32_t typeID, uint32_t version); // Begin TkType virtual const char* getName() const override { return getNameInternal(); } virtual uint32_t getVersion() const override { return getVersionInternal(); } // End TkType // Public methods /** Access to the class name. \return a C string pointer to the class name. */ const char* getNameInternal() const; /** Access to the data format version for the class. \return the data format version. */ uint32_t getVersionInternal() const; /** Access to a unique identifier for the class (set using the NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE macro). \return the class's unique identifier. */ uint32_t getID() const; /** Access to a runtime-unique small index for the class. \return the index for the class. */ uint32_t getIndex() const; /** \return whether or not the index has been set (see setIndex) to a valid value. */ bool indexIsValid() const; private: enum { InvalidIndex = 0xFFFFFFFF }; /** Sets the type index. \param[in] index The index to set. */ void setIndex(uint32_t index); const char* m_name; //!< The name of the class, set by the constructor. uint32_t m_ID; //!< The unique identifier for the class, set by the constructor. uint32_t m_version; //!< The data format version for the class, set by the constructor. uint32_t m_index; //!< The index set for this class, set using setIndex(). friend class TkFrameworkImpl; }; //////// TkTypeImpl inline methods //////// NV_INLINE TkTypeImpl::TkTypeImpl(const char* typeName, uint32_t typeID, uint32_t version) : m_name(typeName) , m_ID(typeID) , m_version(version) , m_index((uint32_t)InvalidIndex) { } NV_INLINE const char* TkTypeImpl::getNameInternal() const { return m_name; } NV_INLINE uint32_t TkTypeImpl::getVersionInternal() const { return m_version; } NV_INLINE uint32_t TkTypeImpl::getID() const { return m_ID; } NV_INLINE uint32_t TkTypeImpl::getIndex() const { return m_index; } NV_INLINE bool TkTypeImpl::indexIsValid() const { return m_index != (uint32_t)InvalidIndex; } NV_INLINE void TkTypeImpl::setIndex(uint32_t index) { m_index = index; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKTYPEIMPL_H
4,415
C
26.428571
110
0.68154
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFrameworkImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKFRAMEWORKIMPL_H #define NVBLASTTKFRAMEWORKIMPL_H #include "NvBlastTkFramework.h" #include "NvBlastInternalProfiler.h" #include "NvBlastTkCommon.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include "NvBlastHashSet.h" namespace Nv { namespace Blast { // Forward declarations class TkTypeImpl; class TkJointImpl; /** Implementation of TkFramework */ class TkFrameworkImpl : public TkFramework { public: TkFrameworkImpl(); ~TkFrameworkImpl(); // Begin TkFramework virtual void release() override; virtual const TkType* getType(TkTypeIndex::Enum typeIndex) const override; virtual TkIdentifiable* findObjectByID(const NvBlastID& id) const override; virtual uint32_t getObjectCount(const TkType& type) const override; virtual uint32_t getObjects(TkIdentifiable** buffer, uint32_t bufferSize, const TkType& type, uint32_t indexStart = 0) const override; virtual bool reorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap = nullptr, bool keepBondNormalChunkOrder = false) const override; virtual bool ensureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount) const override; virtual TkAsset* createAsset(const TkAssetDesc& desc) override; virtual TkAsset* createAsset(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs = nullptr, uint32_t jointDescCount = 0, bool ownsAsset = false) override; virtual TkGroup* createGroup(const TkGroupDesc& desc) override; virtual TkActor* createActor(const TkActorDesc& desc) override; virtual TkJoint* createJoint(const TkJointDesc& desc) override; // End TkFramework // Public methods /** To be called by any TkIdentifiable object when it is created, so the framework can track it. */ void onCreate(TkIdentifiable& object); /** To be called by any TkIdentifiable object when it is deleted, so the framework can stop tracking it. */ void onDestroy(TkIdentifiable& object); /** Special onCreate method for joints, since they are not TkIdentifiable. */ void onCreate(TkJointImpl& joint); /** Special onDestroy method for joints, since they are not TkIdentifiable. */ void onDestroy(TkJointImpl& joint); /** Must be called whenever a TkIdentifiable object's ID is changed, so that the framework can associate the new ID with it. */ void onIDChange(TkIdentifiable& object, const NvBlastID& IDPrev, const NvBlastID& IDCurr); /** Internal (non-virtual) method to find a TkIdentifiable object based upon its NvBlastID. */ TkIdentifiable* findObjectByIDInternal(const NvBlastID& id) const; // Access to singleton /** Retrieve the global singleton. */ static TkFrameworkImpl* get(); /** Set the global singleton, if it's not already set, or set it to NULL. Returns true iff successful. */ static bool set(TkFrameworkImpl* framework); private: // Enums enum { ClassID = NVBLAST_FOURCC('T', 'K', 'F', 'W') }; //!< TkFramework identifier token, used in serialization // Static data static TkFrameworkImpl* s_framework; //!< Global (singleton) object pointer // Types InlineArray<const TkTypeImpl*, TkTypeIndex::TypeCount>::type m_types; //!< TkIdentifiable static type data HashMap<uint32_t, uint32_t>::type m_typeIDToIndex; //!< Map to type data keyed by ClassID // Objects and object names HashMap<NvBlastID, TkIdentifiable*>::type m_IDToObject; //!< Map to all TkIdentifiable objects, keyed by NvBlastID InlineArray<Array<TkIdentifiable*>::type, TkTypeIndex::TypeCount>::type m_objects; //!< Catalog of all TkIdentifiable objects, grouped by type. (Revisit implementation.) // Track external joints (to do: make this a pool) HashSet<TkJointImpl*>::type m_joints; //!< All internal joints }; //////// TkFrameworkImpl inline methods //////// NV_INLINE TkIdentifiable* TkFrameworkImpl::findObjectByIDInternal(const NvBlastID& id) const { const auto entry = m_IDToObject.find(id); if (entry == nullptr) { return nullptr; } return entry->second; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKFRAMEWORKIMPL_H
6,653
C
40.074074
253
0.650534
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTaskManager.cpp
// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "NvTask.h" #include "NvTaskDefine.h" #include "NvCpuDispatcher.h" #include "NvGpuDispatcher.h" #include "NvErrorCallback.h" #include "NvBlastGlobals.h" #include "NvBlastAssert.h" #include "NvBlastAtomic.h" #include "NvBlastAllocator.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include <mutex> using namespace nvidia; using namespace nvidia::task; namespace Nv { namespace Blast { class MutexScopedLock { std::mutex& mMutex; NV_NOCOPY(MutexScopedLock) public: NV_INLINE MutexScopedLock(std::mutex& mutex) : mMutex(mutex) { mMutex.lock(); } NV_INLINE ~MutexScopedLock() { mMutex.unlock(); } }; #define LOCK() MutexScopedLock __lock__(mMutex) constexpr int EOL = -1; typedef HashMap<const char *, NvTaskID>::type NvBlastTkTaskNameToIDMap; struct NvBlastTkTaskDepTableRow { NvTaskID mTaskID; int mNextDep; }; typedef Array<NvBlastTkTaskDepTableRow>::type NvBlastTkTaskDepTable; struct NvTaskAccess : public NvTask { void setTaskID(NvTaskID taskID) { mTaskID = taskID; } void setTm(NvTaskManager* tm) { mTm = tm; } }; NvTaskAccess& ACCESS(NvTask& task) { return reinterpret_cast<NvTaskAccess&>(task); } NvTaskAccess* ACCESS(NvTask* task) { return reinterpret_cast<NvTaskAccess*>(task); } struct NvLightCpuTaskAccess : public NvLightCpuTask { bool atomicIncrementRefCount() { return Nv::Blast::atomicIncrement(&mRefCount); } bool atomicDecrementRefCount() { return Nv::Blast::atomicDecrement(&mRefCount); } }; NvLightCpuTaskAccess& ACCESS(NvLightCpuTask& task) { return reinterpret_cast<NvLightCpuTaskAccess&>(task); } class NvBlastTkTaskTableRow { public: NvBlastTkTaskTableRow() : mRefCount( 1 ), mStartDep(EOL), mLastDep(EOL) {} void addDependency( NvBlastTkTaskDepTable& depTable, NvTaskID taskID ) { int newDep = int(depTable.size()); NvBlastTkTaskDepTableRow row; row.mTaskID = taskID; row.mNextDep = EOL; depTable.pushBack( row ); if( mLastDep == EOL ) { mStartDep = mLastDep = newDep; } else { depTable[ uint32_t(mLastDep) ].mNextDep = newDep; mLastDep = newDep; } } NvTask * mTask; volatile int mRefCount; NvTaskType::Enum mType; int mStartDep; int mLastDep; }; typedef Array<NvBlastTkTaskTableRow>::type NvTaskTable; /* Implementation of NvTaskManager abstract API */ class NvBlastTkTaskManager : public NvTaskManager { NV_NOCOPY(NvBlastTkTaskManager) public: NvBlastTkTaskManager(NvErrorCallback& , NvCpuDispatcher*, NvGpuDispatcher*); ~NvBlastTkTaskManager(); void setCpuDispatcher( NvCpuDispatcher& ref ) { mCpuDispatcher = &ref; } NvCpuDispatcher* getCpuDispatcher() const { return mCpuDispatcher; } void setGpuDispatcher( NvGpuDispatcher& ref ) { mGpuDispatcher = &ref; } NvGpuDispatcher* getGpuDispatcher() const { return mGpuDispatcher; } void resetDependencies(); void startSimulation(); void stopSimulation(); void taskCompleted( NvTask& task ); NvTaskID getNamedTask( const char *name ); NvTaskID submitNamedTask( NvTask *task, const char *name, NvTaskType::Enum type = NvTaskType::TT_CPU ); NvTaskID submitUnnamedTask( NvTask& task, NvTaskType::Enum type = NvTaskType::TT_CPU ); NvTask* getTaskFromID( NvTaskID ); bool dispatchTask( NvTaskID taskID, bool gpuGroupStart ); bool resolveRow( NvTaskID taskID, bool gpuGroupStart ); void release(); void finishBefore( NvTask& task, NvTaskID taskID ); void startAfter( NvTask& task, NvTaskID taskID ); void addReference( NvTaskID taskID ); void decrReference( NvTaskID taskID ); int32_t getReference( NvTaskID taskID ) const; void decrReference( NvLightCpuTask& lighttask ); void addReference( NvLightCpuTask& lighttask ); void emitStartEvent(NvBaseTask& basetask, uint32_t threadId); void emitStopEvent(NvBaseTask& basetask, uint32_t threadId); NvErrorCallback& mErrorCallback; NvCpuDispatcher* mCpuDispatcher; NvGpuDispatcher* mGpuDispatcher; NvBlastTkTaskNameToIDMap mName2IDmap; volatile int mPendingTasks; std::mutex mMutex; NvBlastTkTaskDepTable mDepTable; NvTaskTable mTaskTable; Array<NvTaskID>::type mStartDispatch; }; NvBlastTkTaskManager::NvBlastTkTaskManager(NvErrorCallback& errorCallback, NvCpuDispatcher* cpuDispatcher, NvGpuDispatcher* gpuDispatcher) : mErrorCallback (errorCallback) , mCpuDispatcher( cpuDispatcher ) , mGpuDispatcher( gpuDispatcher ) , mPendingTasks( 0 ) , mDepTable(NV_DEBUG_EXP("NvBlastTkTaskDepTable")) , mTaskTable(NV_DEBUG_EXP("NvTaskTable")) , mStartDispatch(NV_DEBUG_EXP("StartDispatch")) { } NvBlastTkTaskManager::~NvBlastTkTaskManager() { } void NvBlastTkTaskManager::release() { NVBLAST_DELETE(this, NvBlastTkTaskManager); } void NvBlastTkTaskManager::decrReference(NvLightCpuTask& lighttask) { /* This does not need a lock! */ if (!ACCESS(lighttask).atomicDecrementRefCount()) { NVBLAST_ASSERT(mCpuDispatcher); if (mCpuDispatcher) { mCpuDispatcher->submitTask(lighttask); } else { lighttask.release(); } } } void NvBlastTkTaskManager::addReference(NvLightCpuTask& lighttask) { /* This does not need a lock! */ ACCESS(lighttask).atomicIncrementRefCount(); } void NvBlastTkTaskManager::emitStartEvent(NvBaseTask& basetask, uint32_t threadId) { NvBaseTask* tmp = &basetask; NV_UNUSED(tmp); NV_UNUSED(threadId); /* This does not need a lock! */ #if NV_SUPPORT_NVTASK_PROFILING && NV_PROFILE //NV_COMPILE_TIME_ASSERT(sizeof(NvProfileEventId::mEventId) == sizeof(NvBaseTask::mEventID)); if (NvBlastGlobalGetProfilerCallback()) NvBlastGlobalGetProfilerCallback()->zoneStart(basetask.getName(), true, 0); #endif } void NvBlastTkTaskManager::emitStopEvent(NvBaseTask& basetask, uint32_t threadId) { NvBaseTask* tmp = &basetask; NV_UNUSED(tmp); NV_UNUSED(threadId); /* This does not need a lock! */ if (NvBlastGlobalGetProfilerCallback()) NvBlastGlobalGetProfilerCallback()->zoneEnd(nullptr, basetask.getName(), true, 0); #if NV_SUPPORT_NVTASK_PROFILING && NV_PROFILE //NV_COMPILE_TIME_ASSERT(sizeof(NvProfileEventId::mEventId) == sizeof(NvBaseTask::mEventID)); #endif } /* * Called by the owner (Scene) at the start of every frame, before * asking for tasks to be submitted. */ void NvBlastTkTaskManager::resetDependencies() { NVBLAST_ASSERT( !mPendingTasks ); // only valid if you don't resubmit named tasks, this is true for the SDK NVBLAST_ASSERT( mCpuDispatcher ); mTaskTable.clear(); mDepTable.clear(); mName2IDmap.clear(); mPendingTasks = 0; } /* * Called by the owner (Scene) to start simulating the task graph. * Dispatch all tasks with refCount == 1 */ void NvBlastTkTaskManager::startSimulation() { NVBLAST_ASSERT( mCpuDispatcher ); if( mGpuDispatcher ) { mGpuDispatcher->startSimulation(); } /* Handle empty task graph */ if( mPendingTasks == 0 ) { return; } bool gpuDispatch = false; for( NvTaskID i = 0 ; i < mTaskTable.size() ; i++ ) { if( mTaskTable[ i ].mType == NvTaskType::TT_COMPLETED ) { continue; } if( !Nv::Blast::atomicDecrement( &mTaskTable[ i ].mRefCount ) ) { mStartDispatch.pushBack(i); } } for( uint32_t i=0; i<mStartDispatch.size(); ++i) { gpuDispatch |= dispatchTask( mStartDispatch[i], gpuDispatch ); } //mStartDispatch.resize(0); mStartDispatch.forceSize_Unsafe(0); if( mGpuDispatcher && gpuDispatch ) { mGpuDispatcher->finishGroup(); } } void NvBlastTkTaskManager::stopSimulation() { if( mGpuDispatcher ) { mGpuDispatcher->stopSimulation(); } } NvTaskID NvBlastTkTaskManager::getNamedTask( const char *name ) { const NvBlastTkTaskNameToIDMap::Entry *ret; { LOCK(); ret = mName2IDmap.find( name ); } if( ret ) { return ret->second; } else { // create named entry in task table, without a task return submitNamedTask( NULL, name, NvTaskType::TT_NOT_PRESENT ); } } NvTask* NvBlastTkTaskManager::getTaskFromID( NvTaskID id ) { LOCK(); // todo: reader lock necessary? return mTaskTable[ id ].mTask; } /* If called at runtime, must be thread-safe */ NvTaskID NvBlastTkTaskManager::submitNamedTask( NvTask *task, const char *name, NvTaskType::Enum type ) { if( task ) { ACCESS(task)->setTm(this); task->submitted(); } LOCK(); const NvBlastTkTaskNameToIDMap::Entry *ret = mName2IDmap.find( name ); if( ret ) { NvTaskID prereg = ret->second; if( task ) { /* name was registered for us by a dependent task */ NVBLAST_ASSERT( !mTaskTable[ prereg ].mTask ); NVBLAST_ASSERT( mTaskTable[ prereg ].mType == NvTaskType::TT_NOT_PRESENT ); mTaskTable[ prereg ].mTask = task; mTaskTable[ prereg ].mType = type; ACCESS(task)->setTaskID(prereg); } return prereg; } else { Nv::Blast::atomicIncrement(&mPendingTasks); NvTaskID id = static_cast<NvTaskID>(mTaskTable.size()); mName2IDmap[ name ] = id; if( task ) { ACCESS(task)->setTaskID(id); } NvBlastTkTaskTableRow r; r.mTask = task; r.mType = type; mTaskTable.pushBack(r); return id; } } /* * Add an unnamed task to the task table */ NvTaskID NvBlastTkTaskManager::submitUnnamedTask( NvTask& task, NvTaskType::Enum type ) { Nv::Blast::atomicIncrement(&mPendingTasks); ACCESS(task).setTm(this); task.submitted(); LOCK(); ACCESS(task).setTaskID(static_cast<NvTaskID>(mTaskTable.size())); NvBlastTkTaskTableRow r; r.mTask = &task; r.mType = type; mTaskTable.pushBack(r); return task.getTaskID(); } /* Called by worker threads (or cooperating application threads) when a * NvTask has completed. Propogate depdenencies, decrementing all * referenced tasks' refCounts. If any of those reach zero, activate * those tasks. */ void NvBlastTkTaskManager::taskCompleted( NvTask& task ) { LOCK(); if( resolveRow( task.getTaskID(), false ) ) { mGpuDispatcher->finishGroup(); } } /* ================== Private Functions ======================= */ /* * Add a dependency to force 'task' to complete before the * referenced 'taskID' is allowed to be dispatched. */ void NvBlastTkTaskManager::finishBefore( NvTask& task, NvTaskID taskID ) { LOCK(); NVBLAST_ASSERT( mTaskTable[ taskID ].mType != NvTaskType::TT_COMPLETED ); mTaskTable[ task.getTaskID() ].addDependency( mDepTable, taskID ); Nv::Blast::atomicIncrement( &mTaskTable[ taskID ].mRefCount ); } /* * Add a dependency to force 'task' to wait for the referenced 'taskID' * to complete before it is allowed to be dispatched. */ void NvBlastTkTaskManager::startAfter( NvTask& task, NvTaskID taskID ) { LOCK(); NVBLAST_ASSERT( mTaskTable[ taskID ].mType != NvTaskType::TT_COMPLETED ); mTaskTable[ taskID ].addDependency( mDepTable, task.getTaskID() ); Nv::Blast::atomicIncrement( &mTaskTable[ task.getTaskID() ].mRefCount ); } void NvBlastTkTaskManager::addReference( NvTaskID taskID ) { LOCK(); Nv::Blast::atomicIncrement( &mTaskTable[ taskID ].mRefCount ); } /* * Remove one reference count from a task. Must be done here to make it thread safe. */ void NvBlastTkTaskManager::decrReference( NvTaskID taskID ) { LOCK(); if( !Nv::Blast::atomicDecrement( &mTaskTable[ taskID ].mRefCount ) ) { if( dispatchTask( taskID, false ) ) { mGpuDispatcher->finishGroup(); } } } int32_t NvBlastTkTaskManager::getReference(NvTaskID taskID) const { return mTaskTable[ taskID ].mRefCount; } /* * A task has completed, decrement all dependencies and submit tasks * that are ready to run. Signal simulation end if ther are no more * pending tasks. */ bool NvBlastTkTaskManager::resolveRow( NvTaskID taskID, bool gpuGroupStart ) { int depRow = mTaskTable[ taskID ].mStartDep; while( depRow != EOL ) { NvBlastTkTaskDepTableRow& row = mDepTable[ uint32_t(depRow) ]; NvBlastTkTaskTableRow& dtt = mTaskTable[ row.mTaskID ]; if( !Nv::Blast::atomicDecrement( &dtt.mRefCount ) ) { gpuGroupStart |= dispatchTask( row.mTaskID, gpuGroupStart ); } depRow = row.mNextDep; } Nv::Blast::atomicDecrement( &mPendingTasks ); return gpuGroupStart; } /* * Submit a ready task to its appropriate dispatcher. */ bool NvBlastTkTaskManager::dispatchTask( NvTaskID taskID, bool gpuGroupStart ) { LOCK(); // todo: reader lock necessary? NvBlastTkTaskTableRow& tt = mTaskTable[ taskID ]; // prevent re-submission if( tt.mType == NvTaskType::TT_COMPLETED ) { mErrorCallback.reportError(NvErrorCode::eDEBUG_WARNING, "NvTask dispatched twice", __FILE__, __LINE__); return false; } switch ( tt.mType ) { case NvTaskType::TT_CPU: mCpuDispatcher->submitTask( *tt.mTask ); break; case NvTaskType::TT_GPU: #if NV_WINDOWS_FAMILY if( mGpuDispatcher ) { if( !gpuGroupStart ) { mGpuDispatcher->startGroup(); } mGpuDispatcher->submitTask( *tt.mTask ); gpuGroupStart = true; } else #endif { mErrorCallback.reportError(NvErrorCode::eDEBUG_WARNING, "No GPU dispatcher", __FILE__, __LINE__); } break; case NvTaskType::TT_NOT_PRESENT: /* No task registered with this taskID, resolve its dependencies */ NVBLAST_ASSERT(!tt.mTask); gpuGroupStart |= resolveRow( taskID, gpuGroupStart ); break; case NvTaskType::TT_COMPLETED: default: mErrorCallback.reportError(NvErrorCode::eDEBUG_WARNING, "Unknown task type", __FILE__, __LINE__); gpuGroupStart |= resolveRow( taskID, gpuGroupStart ); break; } tt.mType = NvTaskType::TT_COMPLETED; return gpuGroupStart; } } // namespace Blast } // namespace Nv // Implement NvTaskManager factory namespace nvidia { namespace task { NvTaskManager* NvTaskManager::createTaskManager(NvErrorCallback& errorCallback, NvCpuDispatcher* cpuDispatcher, NvGpuDispatcher* gpuDispatcher) { return NVBLAST_NEW(Nv::Blast::NvBlastTkTaskManager)(errorCallback, cpuDispatcher, gpuDispatcher); } } }
16,566
C++
27.51463
143
0.664433
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkGroupImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKGROUPIMPL_H #define NVBLASTTKGROUPIMPL_H #include "NvBlastTkTaskImpl.h" #include "NvBlastTkGroup.h" #include "NvBlastTkTypeImpl.h" namespace Nv { namespace Blast { class TkActorImpl; class TkFamilyImpl; NVBLASTTK_IMPL_DECLARE(Group) { ~TkGroupImpl(); public: TkGroupImpl(); NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('G', 'R', 'P', '\0'); static TkGroupImpl* create(const TkGroupDesc& desc); // Begin TkGroup virtual bool addActor(TkActor& actor) override; virtual uint32_t getActorCount() const override; virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const override; virtual uint32_t startProcess() override; virtual bool endProcess() override; virtual void getStats(TkGroupStats& stats) const override; virtual void setWorkerCount(uint32_t workerCount) override; virtual uint32_t getWorkerCount() const override; virtual TkGroupWorker* acquireWorker() override; virtual void returnWorker(TkGroupWorker*) override; // End TkGroup // TkGroupImpl API /** Remove the actor from this group if the actor actually belongs to it and the group is not processing. \param[in] actor The TkActor to remove. \return true if removing succeeded, false otherwise */ bool removeActor(TkActor& actor); /** Add the actor to this group's job queue. It is the caller's responsibility to add an actor only once. This condition is checked in debug builds. */ void enqueue(TkActorImpl* tkActor); /** Atomically check if this group is processing actors. @see setProcessing() \return true between startProcess() and endProcess() calls, false otherwise */ bool isProcessing() const; private: /** Atomically set the processing state. This function checks for the current state before changing it. @see isProcessing() \param[in] value the value of the new state \return true if the new state could be set, false otherwise */ bool setProcessing(bool value); /** Get the group-family shared memory for the specified family. To be used when the memory is expected to already exist. */ SharedMemory* getSharedMemory(TkFamilyImpl* family); void releaseSharedMemory(TkFamilyImpl* fam, SharedMemory* mem); // functions to add/remove actors _without_ group-family memory management void addActorInternal(TkActorImpl& tkActor); void addActorsInternal(TkActorImpl** actors, uint32_t numActors); void removeActorInternal(TkActorImpl& tkActor); uint32_t m_actorCount; //!< number of actors in this group HashMap<TkFamilyImpl*, SharedMemory*>::type m_sharedMemory; //!< memory sharable by actors in the same family in this group // it is assumed no more than the asset's number of bond and chunks fracture commands are produced SharedBlock<NvBlastChunkFractureData> m_chunkTempDataBlock; //!< chunk data for damage/fracture SharedBlock<NvBlastBondFractureData> m_bondTempDataBlock; //!< bond data for damage/fracture SharedBlock<NvBlastChunkFractureData> m_chunkEventDataBlock; //!< initial memory block for event data SharedBlock<NvBlastBondFractureData> m_bondEventDataBlock; //!< initial memory block for event data SharedBlock<char> m_splitScratchBlock; //!< split scratch memory std::atomic<bool> m_isProcessing; //!< true while workers are processing Array<TkWorker>::type m_workers; //!< this group's workers Array<TkWorkerJob>::type m_jobs; //!< this group's process jobs //#if NV_PROFILE TkGroupStats m_stats; //!< accumulated group's worker stats //#endif std::mutex m_workerMtx; friend class TkWorker; }; NV_INLINE bool TkGroupImpl::isProcessing() const { return m_isProcessing.load(); } NV_INLINE void TkGroupImpl::getStats(TkGroupStats& stats) const { #if NV_PROFILE memcpy(&stats, &m_stats, sizeof(TkGroupStats)); #else NV_UNUSED(stats); #endif } NV_INLINE uint32_t TkGroupImpl::getActorCount() const { return m_actorCount; } NV_INLINE SharedMemory* TkGroupImpl::getSharedMemory(TkFamilyImpl* family) { SharedMemory* mem = m_sharedMemory[family]; NVBLAST_ASSERT(mem != nullptr); return mem; } NV_FORCE_INLINE void operator +=(NvBlastTimers& lhs, const NvBlastTimers& rhs) { lhs.material += rhs.material; lhs.fracture += rhs.fracture; lhs.island += rhs.fracture; lhs.partition += rhs.partition; lhs.visibility += rhs.visibility; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKGROUPIMPL_H
6,776
C
33.93299
139
0.664994
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFrameworkImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastAssert.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlastTkTypeImpl.h" #include "NvBlastGlobals.h" #include <algorithm> using namespace nvidia; using namespace nvidia::shdfnd; NV_INLINE bool operator < (const NvBlastID& id1, const NvBlastID& id2) { return memcmp(&id1, &id2, sizeof(NvBlastID)) < 0; } namespace Nv { namespace Blast { //////// Local definitions //////// // Map type ID to static type data #define NVBLASTTK_REGISTER_TYPE(_name) \ if (!Tk##_name##Impl::s_type.indexIsValid()) \ { \ Tk##_name##Impl::s_type.setIndex(TkTypeIndex::_name); \ } \ m_types[TkTypeIndex::_name] = &Tk##_name##Impl::s_type; \ m_typeIDToIndex[Tk##_name##Impl::s_type.getID()] = TkTypeIndex::_name #define NVBLASTTK_RELEASE_TYPE(_name) \ { \ TkTypeImpl& type = Tk##_name##Impl::s_type; \ auto& toRelease = m_objects[type.getIndex()]; \ for (TkObject* obj : toRelease) \ { \ obj->release(); \ } \ } //////// TkFrameworkImpl static variables //////// TkFrameworkImpl* TkFrameworkImpl::s_framework = nullptr; //////// TkFrameworkImpl static function //////// TkFrameworkImpl* TkFrameworkImpl::get() { return s_framework; } bool TkFrameworkImpl::set(TkFrameworkImpl* framework) { if (s_framework != nullptr) { if (framework != nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::set: framework already set. Pass NULL to this function to destroy framework."); return false; } NVBLAST_DELETE(s_framework, TkFrameworkImpl); } s_framework = framework; return true; } //////// TkFrameworkImpl methods //////// TkFrameworkImpl::TkFrameworkImpl() : TkFramework() { // Register types m_types.resize(TkTypeIndex::TypeCount); m_objects.resize(TkTypeIndex::TypeCount); NVBLASTTK_REGISTER_TYPE(Asset); NVBLASTTK_REGISTER_TYPE(Family); NVBLASTTK_REGISTER_TYPE(Group); } TkFrameworkImpl::~TkFrameworkImpl() { } void TkFrameworkImpl::release() { // Special release of joints, which are not TkIdentifiable: Array<TkJointImpl*>::type joints; // Since the EraseIterator is not exposed joints.reserve(m_joints.size()); for (auto j = m_joints.getIterator(); !j.done(); ++j) { joints.pushBack(*j); } for (uint32_t i = 0; i < joints.size(); ++i) { joints[i]->release(); } NVBLAST_ASSERT(m_joints.size() == 0); joints.reset(); // Since we will be deleting the allocator NVBLASTTK_RELEASE_TYPE(Group); NVBLASTTK_RELEASE_TYPE(Asset); set(nullptr); } const TkType* TkFrameworkImpl::getType(TkTypeIndex::Enum typeIndex) const { if (typeIndex < 0 || typeIndex >= TkTypeIndex::TypeCount) { NVBLAST_LOG_WARNING("TkFrameworkImpl::getType: invalid typeIndex."); return nullptr; } return m_types[typeIndex]; } TkIdentifiable* TkFrameworkImpl::findObjectByID(const NvBlastID& id) const { TkIdentifiable* object = findObjectByIDInternal(id); if (object == nullptr) { NVBLAST_LOG_WARNING("TkFrameworkImpl::findObjectByID: object not found."); } return object; } uint32_t TkFrameworkImpl::getObjectCount(const TkType& type) const { const uint32_t index = static_cast<const TkTypeImpl&>(type).getIndex(); if (index >= m_objects.size()) { NVBLAST_LOG_ERROR("TkFrameworkImpl::getObjectCount: BlastTk object type unrecognized."); return 0; } return m_objects[index].size(); } uint32_t TkFrameworkImpl::getObjects(TkIdentifiable** buffer, uint32_t bufferSize, const TkType& type, uint32_t indexStart /* = 0 */) const { const uint32_t index = static_cast<const TkTypeImpl&>(type).getIndex(); if (index >= m_objects.size()) { NVBLAST_LOG_ERROR("TkFrameworkImpl::getObjectCount: BlastTk object type unrecognized."); return 0; } const auto& objectArray = m_objects[index]; uint32_t objectCount = objectArray.size(); if (objectCount <= indexStart) { NVBLAST_LOG_WARNING("TkFrameworkImpl::getObjects: indexStart beyond end of object list."); return 0; } objectCount -= indexStart; if (objectCount > bufferSize) { objectCount = bufferSize; } memcpy(buffer, objectArray.begin() + indexStart, objectCount * sizeof(TkObject*)); return objectCount; } bool TkFrameworkImpl::reorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap /*= nullptr*/, bool keepBondNormalChunkOrder /*= false*/) const { uint32_t* map = chunkReorderMap != nullptr ? chunkReorderMap : static_cast<uint32_t*>(NVBLAST_ALLOC_NAMED(chunkCount * sizeof(uint32_t), "reorderAssetDescChunks:chunkReorderMap")); void* scratch = NVBLAST_ALLOC_NAMED(chunkCount * sizeof(NvBlastChunkDesc), "reorderAssetDescChunks:scratch"); const bool result = NvBlastReorderAssetDescChunks(chunkDescs, chunkCount, bondDescs, bondCount, map, keepBondNormalChunkOrder, scratch, logLL); NVBLAST_FREE(scratch); if (chunkReorderMap == nullptr) { NVBLAST_FREE(map); } return result; } bool TkFrameworkImpl::ensureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount) const { void* scratch = NVBLAST_ALLOC_NAMED(chunkCount, "ensureAssetExactSupportCoverage:scratch"); const bool result = NvBlastEnsureAssetExactSupportCoverage(chunkDescs, chunkCount, scratch, logLL); NVBLAST_FREE(scratch); return result; } TkAsset* TkFrameworkImpl::createAsset(const TkAssetDesc& desc) { TkAssetImpl* asset = TkAssetImpl::create(desc); if (asset == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createAsset: failed to create asset."); } return asset; } TkAsset* TkFrameworkImpl::createAsset(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs, uint32_t jointDescCount, bool ownsAsset) { TkAssetImpl* asset = TkAssetImpl::create(assetLL, jointDescs, jointDescCount, ownsAsset); if (asset == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createAsset: failed to create asset."); } return asset; } TkGroup* TkFrameworkImpl::createGroup(const TkGroupDesc& desc) { TkGroupImpl* group = TkGroupImpl::create(desc); if (group == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createGroup: failed to create group."); } return group; } TkActor* TkFrameworkImpl::createActor(const TkActorDesc& desc) { TkActor* actor = TkActorImpl::create(desc); if (actor == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createActor: failed to create actor."); } return actor; } TkJoint* TkFrameworkImpl::createJoint(const TkJointDesc& desc) { TkJointImpl** handle0 = nullptr; TkJointImpl** handle1 = nullptr; TkFamilyImpl* family0 = static_cast<TkFamilyImpl*>(desc.families[0]); TkFamilyImpl* family1 = static_cast<TkFamilyImpl*>(desc.families[1]); NVBLAST_CHECK_ERROR(family0 != nullptr || family1 != nullptr, "TkFrameworkImpl::createJoint: at least one family in the TkJointDesc must be valid.", return nullptr); NVBLAST_CHECK_ERROR(family0 == nullptr || desc.chunkIndices[0] < family0->getAssetImpl()->getChunkCount(), "TkFrameworkImpl::createJoint: desc.chunkIndices[0] is invalid.", return nullptr); NVBLAST_CHECK_ERROR(family1 == nullptr || desc.chunkIndices[1] < family1->getAssetImpl()->getChunkCount(), "TkFrameworkImpl::createJoint: desc.chunkIndices[1] is invalid.", return nullptr); const bool actorsAreTheSame = family0 == family1 && family0->getActorByChunk(desc.chunkIndices[0]) == family1->getActorByChunk(desc.chunkIndices[1]); NVBLAST_CHECK_ERROR(!actorsAreTheSame, "TkFrameworkImpl::createJoint: the chunks listed in the TkJointDesc must be in different actors.", return nullptr); if (family0 != nullptr) { const bool isSupportChunk = !isInvalidIndex(NvBlastAssetGetChunkToGraphNodeMap(family0->getAssetImpl()->getAssetLLInternal(), logLL)[desc.chunkIndices[0]]); NVBLAST_CHECK_ERROR(isSupportChunk, "TkFrameworkImpl::createJoint: desc.chunkIndices[0] is not a support chunk in the asset for desc.families[0]. Joint not created.", return nullptr); handle0 = family0->createExternalJointHandle(getFamilyID(family1), desc.chunkIndices[0], desc.chunkIndices[1]); NVBLAST_CHECK_ERROR(handle0 != nullptr, "TkFrameworkImpl::createJoint: could not create joint handle in family[0]. Joint not created.", return nullptr); } if (family1 != nullptr) { const bool isSupportChunk = !isInvalidIndex(NvBlastAssetGetChunkToGraphNodeMap(family1->getAssetImpl()->getAssetLLInternal(), logLL)[desc.chunkIndices[1]]); NVBLAST_CHECK_ERROR(isSupportChunk, "TkFrameworkImpl::createJoint: desc.chunkIndices[1] is not a support chunk in the asset for desc.families[1]. Joint not created.", return nullptr); if (family1 != family0) { handle1 = family1->createExternalJointHandle(getFamilyID(family0), desc.chunkIndices[1], desc.chunkIndices[0]); NVBLAST_CHECK_ERROR(handle1 != nullptr, "TkFrameworkImpl::createJoint: could not create joint handle in family[1]. Joint not created.", return nullptr); } } TkJointImpl* joint = NVBLAST_NEW(TkJointImpl)(desc, nullptr); NVBLAST_CHECK_ERROR(joint != nullptr, "TkFrameworkImpl::createJoint: failed to create joint.", return nullptr); const TkJointData& jointData = joint->getDataInternal(); if (handle0 != nullptr) { *handle0 = joint; static_cast<TkActorImpl*>(jointData.actors[0])->addJoint(joint->m_links[0]); } if (handle1 != nullptr) { *handle1 = joint; if (jointData.actors[0] != jointData.actors[1]) { static_cast<TkActorImpl*>(jointData.actors[1])->addJoint(joint->m_links[1]); } } return joint; } void TkFrameworkImpl::onCreate(TkIdentifiable& object) { const TkTypeImpl& type = static_cast<const TkTypeImpl&>(object.getType()); const uint32_t index = type.getIndex(); if (index >= m_objects.size()) { if (!isInvalidIndex(index)) { NVBLAST_LOG_ERROR("TkFrameworkImpl::addObject: object type unrecognized."); } return; } auto& objectArray = m_objects[index]; NVBLAST_ASSERT(objectArray.find(&object) == objectArray.end()); objectArray.pushBack(&object); } void TkFrameworkImpl::onDestroy(TkIdentifiable& object) { // remove from id map if present const auto id = object.getID(); if (!TkGUIDIsZero(&id)) { m_IDToObject.erase(id); } // remove from object list const TkTypeImpl& type = static_cast<const TkTypeImpl&>(object.getType()); const uint32_t index = type.getIndex(); if (index >= m_objects.size()) { if (!isInvalidIndex(index)) { NVBLAST_LOG_ERROR("TkFrameworkImpl::removeObject: object type unrecognized."); } return; } auto& objectArray = m_objects[index]; objectArray.findAndReplaceWithLast(&object); } void TkFrameworkImpl::onCreate(TkJointImpl& joint) { NVBLAST_CHECK_ERROR(m_joints.insert(&joint), "TkFrameworkImpl::onCreate: Joint already tracked.", return); } void TkFrameworkImpl::onDestroy(TkJointImpl& joint) { NVBLAST_CHECK_ERROR(m_joints.erase(&joint), "TkFrameworkImpl::onDestroy: Joint not tracked.", return); } void TkFrameworkImpl::onIDChange(TkIdentifiable& object, const NvBlastID& IDPrev, const NvBlastID& IDCurr) { if (!TkGUIDIsZero(&IDPrev)) { if (!m_IDToObject.erase(IDPrev)) { NVBLAST_LOG_ERROR("TkFrameworkImpl::reportIDChanged: object with previous ID doesn't exist."); } } if (!TkGUIDIsZero(&IDCurr)) { auto& value = m_IDToObject[IDCurr]; if (value != nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::reportIDChanged: object with new ID already exists."); return; } value = &object; } } } // namespace Blast } // namespace Nv //////// Global API implementation //////// Nv::Blast::TkFramework* NvBlastTkFrameworkCreate() { if (Nv::Blast::TkFrameworkImpl::get() != nullptr) { NVBLAST_LOG_ERROR("TkFramework::create: framework already created. Use TkFramework::get() to access."); return nullptr; } Nv::Blast::TkFrameworkImpl* framework = NVBLAST_NEW(Nv::Blast::TkFrameworkImpl) (); Nv::Blast::TkFrameworkImpl::set(framework); return Nv::Blast::TkFrameworkImpl::get(); } Nv::Blast::TkFramework* NvBlastTkFrameworkGet() { return Nv::Blast::TkFrameworkImpl::get(); }
15,109
C++
31.634989
233
0.657886
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTask.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTASK_H #define NVBLASTTKTASK_H #include "NvBlastTkGroupTaskManager.h" #include "NvTask.h" #include "NvBlastTkGroup.h" #include <atomic> #include <mutex> #include <condition_variable> namespace Nv { namespace Blast { /** Counting synchronization object for waiting on TkWorkers to finish. */ class TkTaskSync { public: /** Initializes with an expected number of notifications. */ TkTaskSync(uint32_t count) : m_count(count) {} /** Blocks until the expected number of notifications happened. */ void wait() { std::unique_lock<std::mutex> lk(m_mutex); m_cv.wait(lk, [&] { return m_count == 0; }); } /** Decrement the wait() count by one. */ void notify() { //PERF_SCOPE_H("TaskSync::notify"); std::unique_lock<std::mutex> lk(m_mutex); if (m_count > 0) { m_count--; } if (m_count == 0) { lk.unlock(); m_cv.notify_one(); } } /** Peek if notifications are pending. */ bool isDone() { std::unique_lock<std::mutex> lk(m_mutex); return m_count == 0; } /** Sets the expected number of notifications for wait() to unblock. */ void setCount(uint32_t count) { m_count = count; } private: std::mutex m_mutex; std::condition_variable m_cv; uint32_t m_count; }; /** Common job counter for all tasks. */ class TkAtomicCounter { public: TkAtomicCounter() : m_current(0), m_maxCount(0) {} bool isValid(uint32_t val) { return val < m_maxCount; } uint32_t next() { return m_current.fetch_add(1); } void reset(uint32_t maxCount) { m_maxCount = maxCount; m_current = 0; } private: std::atomic<uint32_t> m_current; uint32_t m_maxCount; }; /** A task running one group job after the other until done. Synchronizes atomically with its siblings. */ class TkGroupWorkerTask : public nvidia::task::NvLightCpuTask { public: TkGroupWorkerTask() : NvLightCpuTask(), m_group(nullptr), m_counter(nullptr), m_sync(nullptr) { } void setup(TkGroup* group, TkAtomicCounter* counter, TkTaskSync* sync) { m_group = group; m_counter = counter; m_sync = sync; } virtual void run() override { Nv::Blast::TkGroupWorker* worker = m_group->acquireWorker(); uint32_t jobID = m_counter->next(); while (m_counter->isValid(jobID)) { worker->process(jobID); jobID = m_counter->next(); } m_group->returnWorker(worker); } virtual void release() override { NvLightCpuTask::release(); // release the sync last m_sync->notify(); } virtual const char* getName() const override { return "BlastGroupWorkerTask"; } private: TkGroup* m_group; TkAtomicCounter* m_counter; TkTaskSync* m_sync; }; /** Implements TkGroupTaskManager */ class TkGroupTaskManagerImpl : public TkGroupTaskManager { public: TkGroupTaskManagerImpl(nvidia::task::NvTaskManager& taskManager, TkGroup* group) : m_taskManager(taskManager), m_sync(0), m_group(group) {} // TkGroupTaskManager API virtual void setGroup(TkGroup*) override; virtual uint32_t process(uint32_t) override; virtual void release() override; virtual bool wait(bool block) override; private: static const uint32_t TASKS_MAX_COUNT = 16; nvidia::task::NvTaskManager& m_taskManager; TkAtomicCounter m_counter; TkGroupWorkerTask m_tasks[TASKS_MAX_COUNT]; TkTaskSync m_sync; TkGroup* m_group; }; } // namespace Blast } // namespace Nv #endif // NVBLASTTKTASK_H
5,444
C
25.052631
99
0.641073
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkJointImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" namespace Nv { namespace Blast { //////// Member functions //////// TkJointImpl::TkJointImpl(const TkJointDesc& desc, TkFamilyImpl* owner) : m_owner(owner) { userData = nullptr; // Do not fire off a creation event. Creation events will only be fired when a family-internal joint is created. NVBLAST_ASSERT(desc.families[0] != nullptr || desc.families[1] != nullptr); NVBLAST_ASSERT(desc.families[0] == nullptr || desc.chunkIndices[0] < static_cast<TkFamilyImpl*>(desc.families[0])->getAssetImpl()->getChunkCount()); NVBLAST_ASSERT(desc.attachPositions[0].isFinite()); NVBLAST_ASSERT(desc.families[1] == nullptr || desc.chunkIndices[1] < static_cast<TkFamilyImpl*>(desc.families[1])->getAssetImpl()->getChunkCount()); NVBLAST_ASSERT(desc.attachPositions[1].isFinite()); for (int i = 0; i < 2; ++i) { m_data.actors[i] = desc.families[i] != nullptr ? static_cast<TkFamilyImpl*>(desc.families[i])->getActorByChunk(desc.chunkIndices[i]) : nullptr; m_data.chunkIndices[i] = desc.chunkIndices[i]; m_data.attachPositions[i] = desc.attachPositions[i]; m_links[i].m_joint = this; } if (owner == nullptr) { TkFrameworkImpl::get()->onCreate(*this); } } void TkJointImpl::release() { removeReferencesInActors(); if (m_owner != nullptr) { // Internal joint m_owner->releaseJoint(*this); } else { // External joint removeReferencesInFamilies(); TkFrameworkImpl::get()->onDestroy(*this); NVBLAST_DELETE(this, TkJointImpl); } } void TkJointImpl::setActors(TkActorImpl* actor0, TkActorImpl* actor1, TkEventQueue* alternateQueue) { NVBLAST_ASSERT(m_data.actors[0] != nullptr || m_data.actors[1] != nullptr); const bool unreferenced = (actor0 == nullptr && m_data.actors[0] != nullptr) || (actor1 == nullptr && m_data.actors[1] != nullptr); removeReferencesInActors(); if (!unreferenced) { if (actor0 != nullptr) { actor0->addJoint(m_links[0]); } if (actor1 != nullptr && actor1 != actor0) // If the actors are the same, we only need one joint reference { actor1->addJoint(m_links[1]); } } // We do _not_ return if m_data.m_actors[0] == actor0 && m_data.m_actors[1] == actor1 since // this leads to a bug. This function will only be called when an actor is split. It is // possible that the two TkActors in a joint are the same as before, but in this case one // of the actors will be the split actor. Since will be represented by a different // physical actor, this case still needs to be reported in an event. Returning when neither // TkActor has changed will prevent that, and lead to unwanted joint disconnection. const uint32_t familyToUse = m_data.actors[0] != actor0 ? 0 : 1; TkEventQueue* q = alternateQueue == nullptr ? &static_cast<TkActorImpl*>(m_data.actors[familyToUse])->getFamilyImpl().getQueue() : alternateQueue; const bool jointWasInternal = m_data.actors[0] == m_data.actors[1]; if (unreferenced) { removeReferencesInFamilies(); actor0 = actor1 = nullptr; // Make both new actors NULL } if (!jointWasInternal || actor0 != actor1) { // The original actors were different, or they are now, signal a joint update TkJointUpdateEvent* e = q->allocData<TkJointUpdateEvent>(); e->joint = this; e->subtype = unreferenced ? TkJointUpdateEvent::Unreferenced : (jointWasInternal ? TkJointUpdateEvent::External : TkJointUpdateEvent::Changed); m_data.actors[0] = actor0; m_data.actors[1] = actor1; q->addEvent(e); } else if (jointWasInternal) { // The joint was originally created within the same actor and now it remains within the same actor. m_data.actors[0] = m_data.actors[1] = actor0; } } const TkJointData TkJointImpl::getData() const { return getDataInternal(); } void TkJointImpl::removeReferencesInActors() { TkActorImpl* actor0 = static_cast<TkActorImpl*>(m_data.actors[0]); TkActorImpl* actor1 = static_cast<TkActorImpl*>(m_data.actors[1]); if (actor0 != nullptr) { actor0->removeJoint(m_links[0]); } if (actor1 != nullptr && actor1 != actor0) // If the actors are the same, we only had one joint reference { actor1->removeJoint(m_links[1]); } } void TkJointImpl::removeReferencesInFamilies() { if (m_owner != nullptr) { return; // Only concerned with external joints } NVBLAST_ASSERT(m_data.actors[0] != m_data.actors[1] || m_data.actors[0] == nullptr); // This is enforced by the initial assumption in TkFrameworkImpl::createJoint. for (int i = 0; i < 2; ++i) { if (m_data.actors[i] != nullptr) { TkFamilyImpl& family = static_cast<TkActorImpl*>(m_data.actors[i])->getFamilyImpl(); TkJointImpl* joint = nullptr; const bool found = family.deleteExternalJointHandle(joint, getFamilyID(m_data.actors[i ^ 1]), m_data.chunkIndices[i], m_data.chunkIndices[i ^ 1]); NVBLAST_ASSERT((!found && m_data.actors[i ^ 1] == nullptr) || joint == this); // Might not be found if the actors in a family are in the process of being deleted NV_UNUSED(found); } } } } // namespace Blast } // namespace Nv
7,185
C++
35.851282
175
0.667223
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkAssetImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKASSETIMPL_H #define NVBLASTTKASSETIMPL_H #include "NvBlastTkCommon.h" #include "NvBlastTkJoint.h" #include "NvBlastTkAsset.h" #include "NvBlastTkTypeImpl.h" #include "NvBlastArray.h" // Forward declarations struct NvBlastAsset; namespace Nv { namespace Blast { /** Implementation of TkAsset */ NVBLASTTK_IMPL_DECLARE(Asset) { public: TkAssetImpl(); TkAssetImpl(const NvBlastID& id); ~TkAssetImpl(); NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('A', 'S', 'S', 'T'); // Public methods /** Factory create method. This method creates a low-level asset and stores a reference to it. \param[in] desc Asset descriptor set by the user. \return a pointer to a new TkAssetImpl object if successful, NULL otherwise. */ static TkAssetImpl* create(const TkAssetDesc& desc); /** Static method to create an asset from an existing low-level asset. \param[in] assetLL A valid low-level asset passed in by the user. \param[in] jointDescs Optional joint descriptors to add to the new asset. \param[in] jointDescCount The number of joint descriptors in the jointDescs array. If non-zero, jointDescs cannot be NULL. \param[in] ownsAsset Whether or not to let this TkAssetImpl object release the low-level NvBlastAsset memory upon its own release. \return a pointer to a new TkAssetImpl object if successful, NULL otherwise. */ static TkAssetImpl* create(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs = nullptr, uint32_t jointDescCount = 0, bool ownsAsset = false); /** \return a pointer to the underlying low-level NvBlastAsset associated with this asset. */ const NvBlastAsset* getAssetLLInternal() const; /** \return the number of internal joint descriptors stored with this asset. */ uint32_t getJointDescCountInternal() const; /** \return the array of internal joint descriptors stored with this asset, with size given by getJointDescCountInternal(). */ const TkAssetJointDesc* getJointDescsInternal() const; // Begin TkAsset virtual const NvBlastAsset* getAssetLL() const override; virtual uint32_t getChunkCount() const override; virtual uint32_t getLeafChunkCount() const override; virtual uint32_t getBondCount() const override; virtual const NvBlastChunk* getChunks() const override; virtual const NvBlastBond* getBonds() const override; virtual const NvBlastSupportGraph getGraph() const override; virtual uint32_t getDataSize() const override; virtual uint32_t getJointDescCount() const override; virtual const TkAssetJointDesc* getJointDescs() const override; // End TkAsset private: /** Utility to add a joint descriptor between the indexed chunks. The two chunks must be support chunks, and there must exist a bond between them. The joint's attachment positions will be the bond centroid. \param[in] chunkIndex0 The first chunk index. \param[in] chunkIndex1 The second chunk index. \return true iff successful. */ bool addJointDesc(uint32_t chunkIndex0, uint32_t chunkIndex1); NvBlastAsset* m_assetLL; //!< The underlying low-level asset. Array<TkAssetJointDesc>::type m_jointDescs; //!< The array of internal joint descriptors. bool m_ownsAsset; //!< Whether or not this asset should release its low-level asset upon its own release. }; //////// TkAssetImpl inline methods //////// NV_INLINE const NvBlastAsset* TkAssetImpl::getAssetLLInternal() const { return m_assetLL; } NV_INLINE uint32_t TkAssetImpl::getJointDescCountInternal() const { return m_jointDescs.size(); } NV_INLINE const TkAssetJointDesc* TkAssetImpl::getJointDescsInternal() const { return m_jointDescs.begin(); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKASSETIMPL_H
5,764
C
34.368098
180
0.695524
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkEventQueue.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKEVENTQUEUE_H #define NVBLASTTKEVENTQUEUE_H #include <algorithm> #include <vector> #include <mutex> #include <atomic> #include "NvBlastTkFrameworkImpl.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { /** A dispatcher queue providing preallocation and thread-safe insertions therein. Typical usage: - preallocate space for events and payload: - reserveEvents, reserveData - enable asserts to detect undersized storage (allocations are not thread safe): - protect(true) - get pointers to payload data and events to fill in, thread safe for preallocated memory: - allocData, addEvent - back on main thread, ensure consistency: - protect(false) - continue adding events and payload on main thread if necessary like above (allocations are safe here) eventually dispatch, or reset if dispatched by proxy */ class TkEventQueue { public: TkEventQueue() : m_currentEvent(0), m_poolCapacity(0), m_pool(nullptr), m_allowAllocs(true) {} /** Peek events queue for dispatch. Do not use in protected state. */ operator const Array<TkEvent>::type&() { NVBLAST_ASSERT(m_allowAllocs); NVBLAST_ASSERT(m_currentEvent == m_events.size()); return m_events; } /** Debug help to catch (unwanted) allocations during task work. Note that this will not actually avoid allocations, but assert in debug builds. Set true before using in distributed environment. Set false to return to single-thread mode. */ void protect(bool enable) { // During parallel use, m_events.size() and m_currentEvent are allowed to diverge. // This is fine because resizeUninitialized does not alter the stored data. NVBLAST_ASSERT(m_currentEvent <= m_events.capacity()); m_events.resizeUninitialized(m_currentEvent); m_allowAllocs = !enable; } /** Restores initial state. Data memory is currently not being reused. To be improved. */ void reset() { m_events.clear(); m_currentEvent = 0; for (void* mem : m_memory) { NVBLAST_FREE(mem); } m_memory.clear(); m_currentData = 0; m_allowAllocs = true; m_poolCapacity = 0; m_pool = nullptr; } /** Queue an event with a payload. */ template<class T> void addEvent(T* payload) { uint32_t index = m_currentEvent.fetch_add(1); // Should not allocate in protected state. NVBLAST_ASSERT(m_allowAllocs || m_currentEvent <= m_events.capacity()); m_events.resizeUninitialized(m_currentEvent); // During parallel use, m_events.size() and m_currentEvent are allowed to diverge. // Consistency is restored in protect(). NVBLAST_ASSERT(!m_allowAllocs || m_currentEvent == m_events.size()); TkEvent& evt = m_events[index]; evt.type = TkEvent::Type(T::EVENT_TYPE); evt.payload = payload; } /** Request storage for payload. */ template<typename T> T* allocData() { uint32_t index = m_currentData.fetch_add(sizeof(T)); if (m_currentData <= m_poolCapacity) { return reinterpret_cast<T*>(&m_pool[index]); } else { // Could do larger block allocation here. reserveData(sizeof(T)); // Account for the requested size. m_currentData = sizeof(T); return reinterpret_cast<T*>(&m_pool[0]); } } /** Preallocate a memory block of size Bytes for payload data. Note that this will inevitably allocate a new memory block. Subsequent calls to allocData will use this memory piecewise. */ void reserveData(size_t size) { NVBLAST_ASSERT(m_allowAllocs); m_pool = reinterpret_cast<uint8_t*>(allocDataBySize(size)); m_poolCapacity = size; m_currentData = 0; } /** Preallocate space for events. */ void reserveEvents(uint32_t n) { NVBLAST_ASSERT(m_allowAllocs); m_events.reserve(m_events.size() + n); } /** Add a listener to dispatch to. */ void addListener(TkEventListener& l) { m_listeners.pushBack(&l); } /** Remove a listener from dispatch list. */ void removeListener(TkEventListener& l) { m_listeners.findAndReplaceWithLast(&l); } /** Dispatch the stored events to the registered listeners. After dispatch, all data is invalidated. */ void dispatch() { dispatch(*this); reset(); } /** Proxy function to dispatch events to this queue's listeners. */ void dispatch(const Array<TkEvent>::type& events) const { if (events.size()) { for (TkEventListener* l : m_listeners) { BLAST_PROFILE_SCOPE_M("TkEventQueue::dispatch"); l->receive(events.begin(), events.size()); } } } private: /** Allocates and stores a block of size Bytes of payload data. */ void* allocDataBySize(size_t size) { void* memory = nullptr; if (size > 0) { memory = NVBLAST_ALLOC_NAMED(size, "TkEventQueue Data"); m_memory.pushBack(memory); } return memory; } Array<TkEvent>::type m_events; //!< holds events Array<void*>::type m_memory; //!< holds allocated data memory blocks std::atomic<uint32_t> m_currentEvent; //!< reference index for event insertion std::atomic<uint32_t> m_currentData; //!< reference index for data insertion size_t m_poolCapacity; //!< size of the currently active memory block (m_pool) uint8_t* m_pool; //!< the current memory block allocData() uses bool m_allowAllocs; //!< assert guard InlineArray<TkEventListener*,4>::type m_listeners; //!< objects to dispatch to }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKEVENTQUEUE_H
7,933
C
30.991935
136
0.621329
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkJointImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKJOINTIMPL_H #define NVBLASTTKJOINTIMPL_H #include "NvBlastTkJoint.h" #include "NvBlastTkCommon.h" #include "NvBlastIndexFns.h" #include "NvBlastAssert.h" #include "NvBlastDLink.h" #include <atomic> namespace Nv { namespace Blast { // Forward declarations class TkActorImpl; class TkJointImpl; class TkFamilyImpl; class TkEventQueue; /** Double-sided link (DLink) which holds a reference back to a joint which contains it. */ struct TkJointLink : public DLink { TkJointImpl* m_joint; //!< The joint containing this link. }; /** Implementation of TkJoint. */ class TkJointImpl : public TkJoint { public: /** Blank constructor only creates valid TkJointLinks (point back to this object) */ TkJointImpl(); /** This constructor sets all internal data. If the joint is defined in an asset, the family instanced from that asset will own this joint, and the 'owner' parameter is that family. Otherwise, in the case where a joint is created from TkFramwork::createJoint, the joint is not owned by a family and 'owner' will be NULL. */ TkJointImpl(const TkJointDesc& desc, TkFamilyImpl* owner); // Begin TkObject virtual void release() override; // End TkObject // Begin TkJoint virtual const TkJointData getData() const override; // End TkJoint // Public API /** Internal method to access a const reference to the joint data. \return a const reference to the joint data. */ const TkJointData& getDataInternal() const; /** Internal method to access a non-const reference to the joint data. \return a non-const reference to the joint data. */ TkJointData& getDataWritable(); /** Set the actors that this joint attaches to. When the actors are different from the joint's current actors, an event will be generated on one of the actors' families event queues to signal the change. Alternatively, if alternateQueue is not NULL then it will be used to hold the event. If a non-NULL attached actor becomes NULL, then this joint will detach its references to both actors (if they exist) and send an event of subtype Unreferenced. This signals the user that the joint may be deleted. \param[in] actor0 The new TkActor to replace the first attached actor. \param[in] actor1 The new TkActor to replace the second attached actor. \param[in] alternateQueue If not NULL, this queue will be used to hold events generated by this function. */ void setActors(TkActorImpl* actor0, TkActorImpl* actor1, TkEventQueue* alternateQueue = nullptr); /** Ensures that any attached actors no longer refer to this joint. */ void removeReferencesInActors(); /** Ensures that any attached actors' families no longer refer to this joint. External joints (created using TkFramework::createJoint) are referenced by the attached actors' families. */ void removeReferencesInFamilies(); private: TkJointData m_data; //!< The data given to the user: attached actors, chunk indices, and actor-local attachment positions. TkJointLink m_links[2]; //!< One link for each actor in m_data.m_actors. If m_data.m_actors[0] == m_data.m_actors[1], then only m_links[0] is used. TkFamilyImpl* m_owner; //!< The owning family if this is an internal joint created during TkFramework::createActor() from a TkAssetDesc with joint flags. friend class TkFrameworkImpl; friend class TkFamilyImpl; friend class TkActorImpl; }; //////// TkJointImpl inline methods //////// NV_INLINE TkJointImpl::TkJointImpl() { m_links[0].m_joint = m_links[1].m_joint = this; } NV_INLINE const TkJointData& TkJointImpl::getDataInternal() const { return m_data; } NV_INLINE TkJointData& TkJointImpl::getDataWritable() { return m_data; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKJOINTIMPL_H
5,637
C
33.378049
162
0.710484
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFamilyImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKFAMILYIMPL_H #define NVBLASTTKFAMILYIMPL_H #include "NvBlastTkCommon.h" #include "NvBlastTkFamily.h" #include "NvBlastTkTypeImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkEventQueue.h" #include "NvBlastHashSet.h" #include "NvBlastHashMap.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastDLink.h" // Forward declarations struct NvBlastFamily; namespace Nv { namespace Blast { // Forward declarations class TkGroupImpl; class TkAssetImpl; NVBLASTTK_IMPL_DECLARE(Family) { public: TkFamilyImpl(); TkFamilyImpl(const NvBlastID& id); ~TkFamilyImpl(); NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('A', 'C', 'T', 'F'); // Begin TkFamily virtual const NvBlastFamily* getFamilyLL() const override; virtual uint32_t getActorCount() const override; virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const override; virtual void addListener(TkEventListener& l) override { m_queue.addListener(l); } virtual void removeListener(TkEventListener& l) override { m_queue.removeListener(l); } virtual void applyFracture(const NvBlastFractureBuffers* commands) override { applyFractureInternal(commands); } virtual const TkAsset* getAsset() const override; virtual void reinitialize(const NvBlastFamily* newFamily, TkGroup* group) override; // End TkFamily // Public methods static TkFamilyImpl* create(const TkAssetImpl* asset); const TkAssetImpl* getAssetImpl() const; NvBlastFamily* getFamilyLLInternal() const; uint32_t getActorCountInternal() const; TkActorImpl* addActor(NvBlastActor* actorLL); void applyFractureInternal(const NvBlastFractureBuffers* commands); void removeActor(TkActorImpl* actorLL); TkEventQueue& getQueue() { return m_queue; } TkActorImpl* getActorByActorLL(const NvBlastActor* actorLL); void updateJoints(TkActorImpl* actor, TkEventQueue* alternateQueue = nullptr); Array<TkActorImpl>::type& getActorsInternal(); uint32_t getInternalJointCount() const; TkJointImpl* getInternalJoints() const; TkJointImpl** createExternalJointHandle(const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1); bool deleteExternalJointHandle(TkJointImpl*& joint, const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1); void releaseJoint(TkJointImpl& joint); TkActorImpl* getActorByChunk(uint32_t chunkIndex); typedef nvidia::shdfnd::Pair<uint32_t, uint32_t> ExternalJointKey; //!< The chunk indices within the TkFamily objects joined by the joint. These chunks will be support chunks. TkJointImpl* findExternalJoint(const TkFamilyImpl* otherFamily, ExternalJointKey key) const; private: TkActorImpl* getActorByIndex(uint32_t index); struct JointSet { NvBlastID m_familyID; HashMap<ExternalJointKey, TkJointImpl*>::type m_joints; }; typedef HashMap<NvBlastID, uint32_t>::type FamilyIDMap; NvBlastFamily* m_familyLL; Array<TkActorImpl>::type m_actors; uint32_t m_internalJointCount; Array<uint8_t>::type m_internalJointBuffer; Array<JointSet*>::type m_jointSets; FamilyIDMap m_familyIDMap; const TkAssetImpl* m_asset; TkEventQueue m_queue; }; //////// TkFamilyImpl inline methods //////// NV_INLINE const TkAssetImpl* TkFamilyImpl::getAssetImpl() const { return m_asset; } NV_INLINE NvBlastFamily* TkFamilyImpl::getFamilyLLInternal() const { return m_familyLL; } NV_INLINE uint32_t TkFamilyImpl::getActorCountInternal() const { NVBLAST_ASSERT(m_familyLL != nullptr); return NvBlastFamilyGetActorCount(m_familyLL, logLL); } NV_INLINE TkActorImpl* TkFamilyImpl::getActorByIndex(uint32_t index) { NVBLAST_ASSERT(index < m_actors.size()); return &m_actors[index]; } NV_INLINE TkActorImpl* TkFamilyImpl::getActorByActorLL(const NvBlastActor* actorLL) { uint32_t index = NvBlastActorGetIndex(actorLL, logLL); return getActorByIndex(index); } NV_INLINE Array<TkActorImpl>::type& TkFamilyImpl::getActorsInternal() { return m_actors; } NV_INLINE uint32_t TkFamilyImpl::getInternalJointCount() const { return m_internalJointCount; } NV_INLINE TkJointImpl* TkFamilyImpl::getInternalJoints() const { return const_cast<TkJointImpl*>(reinterpret_cast<const TkJointImpl*>(m_internalJointBuffer.begin())); } NV_INLINE void TkFamilyImpl::releaseJoint(TkJointImpl& joint) { NVBLAST_ASSERT(joint.m_owner == this); NVBLAST_ASSERT(&joint >= getInternalJoints() && &joint < getInternalJoints() + getInternalJointCount() * sizeof(TkJointImpl)); joint.~TkJointImpl(); joint.m_owner = nullptr; } //////// Inline global functions //////// NV_INLINE const NvBlastID& getFamilyID(const TkActor* actor) { return actor != nullptr ? static_cast<const TkActorImpl*>(actor)->getFamilyImpl().getIDInternal() : *reinterpret_cast<const NvBlastID*>("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); } NV_INLINE const NvBlastID& getFamilyID(const TkFamilyImpl* family) { return family != nullptr ? family->getIDInternal() : *reinterpret_cast<const NvBlastID*>("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKFAMILYIMPL_H
7,463
C
31.593886
182
0.679619
NVIDIA-Omniverse/PhysX/blast/source/sdk/globals/NvBlastInternalProfiler.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTINTERNALPROFILER_H #define NVBLASTINTERNALPROFILER_H #include "NvPreprocessor.h" #if NV_NVTX #include "nvToolsExt.h" NV_INLINE void platformZoneStart(const char* name) { nvtxRangePushA(name); } NV_INLINE void platformZoneEnd() { nvtxRangePop(); } #else NV_INLINE void platformZoneStart(const char*) { } NV_INLINE void platformZoneEnd() { } #endif namespace Nv { namespace Blast { /** Profiler detail to be reported. The higher setting is used, the more details are reported. */ struct InternalProfilerDetail { enum Level { LOW, MEDIUM, HIGH }; }; NV_C_API void NvBlastInternalProfilerSetPlatformEnabled(bool platformEnabled); NV_C_API void NvBlastInternalProfilerSetDetail(Nv::Blast::InternalProfilerDetail::Level); NV_C_API Nv::Blast::InternalProfilerDetail::Level NvBlastInternalProfilerGetDetail(); #if NV_PROFILE NV_C_API void NvBlastProfilerBegin(const char* name, Nv::Blast::InternalProfilerDetail::Level); NV_C_API void NvBlastProfilerEnd(const void* name, Nv::Blast::InternalProfilerDetail::Level); class ProfileScope { public: ProfileScope(const char* name, InternalProfilerDetail::Level level) :m_name(name), m_level(level) { NvBlastProfilerBegin(m_name, m_level); } ~ProfileScope() { NvBlastProfilerEnd(m_name, m_level); } private: const char* m_name; InternalProfilerDetail::Level m_level; }; #define BLAST_PROFILE_PREFIX "Blast: " #define BLAST_PROFILE_ZONE_BEGIN(name) Nv::Blast::NvBlastProfilerBegin(BLAST_PROFILE_PREFIX name, Nv::Blast::InternalProfilerDetail::HIGH) #define BLAST_PROFILE_ZONE_END(name) Nv::Blast::NvBlastProfilerEnd(BLAST_PROFILE_PREFIX name, Nv::Blast::InternalProfilerDetail::HIGH) #define BLAST_PROFILE_SCOPE(name, detail) Nv::Blast::ProfileScope NV_CONCAT(_scope,__LINE__) (BLAST_PROFILE_PREFIX name, detail) #define BLAST_PROFILE_SCOPE_L(name) BLAST_PROFILE_SCOPE(name, Nv::Blast::InternalProfilerDetail::LOW) #define BLAST_PROFILE_SCOPE_M(name) BLAST_PROFILE_SCOPE(name, Nv::Blast::InternalProfilerDetail::MEDIUM) #define BLAST_PROFILE_SCOPE_H(name) BLAST_PROFILE_SCOPE(name, Nv::Blast::InternalProfilerDetail::HIGH) #else #define BLAST_PROFILE_ZONE_BEGIN(name) #define BLAST_PROFILE_ZONE_END(name) #define BLAST_PROFILE_SCOPE_L(name) #define BLAST_PROFILE_SCOPE_M(name) #define BLAST_PROFILE_SCOPE_H(name) #endif } // namespace Blast } // namespace Nv #endif
4,037
C
35.709091
143
0.747089
NVIDIA-Omniverse/PhysX/blast/source/sdk/globals/NvBlastGlobals.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastGlobals.h" #include "NvBlastAssert.h" #include "NvAllocatorCallback.h" #include "NvErrorCallback.h" #include "NsGlobals.h" #include <cstdlib> #include <sstream> #include <iostream> #if NV_WINDOWS_FAMILY #include <windows.h> #endif #if NV_WINDOWS_FAMILY || NV_LINUX_FAMILY #include <malloc.h> #endif namespace Nv { namespace Blast { #if NV_WINDOWS_FAMILY // on win32 we only have 8-byte alignment guaranteed, but the CRT provides special aligned allocation fns NV_FORCE_INLINE void* platformAlignedAlloc(size_t size) { return _aligned_malloc(size, 16); } NV_FORCE_INLINE void platformAlignedFree(void* ptr) { _aligned_free(ptr); } #elif NV_LINUX_FAMILY NV_FORCE_INLINE void* platformAlignedAlloc(size_t size) { return ::memalign(16, size); } NV_FORCE_INLINE void platformAlignedFree(void* ptr) { ::free(ptr); } #else NV_FORCE_INLINE void* platformAlignedAlloc(size_t size) { const int A = 16; unsigned char* mem = (unsigned char*)malloc(size + A); const unsigned char offset = (unsigned char)((uintptr_t)A - (uintptr_t)mem % A - 1); mem += offset; *mem++ = offset; return mem; } NV_FORCE_INLINE void platformAlignedFree(void* ptr) { if (ptr != nullptr) { unsigned char* mem = (unsigned char*)ptr; const unsigned char offset = *--mem; ::free(mem - offset); } } #endif class DefaultAllocatorCallback : public nvidia::NvAllocatorCallback { public: virtual void* allocate(size_t size, const char* typeName, const char* filename, int line) override { NV_UNUSED(typeName); NV_UNUSED(filename); NV_UNUSED(line); return platformAlignedAlloc(size); } virtual void deallocate(void* ptr) override { platformAlignedFree(ptr); } }; DefaultAllocatorCallback s_defaultAllocatorCallback; class DefaultErrorCallback : public nvidia::NvErrorCallback { virtual void reportError(nvidia::NvErrorCode::Enum code, const char* msg, const char* file, int line) override { #if 1 || NV_DEBUG || NV_CHECKED std::stringstream str; str << "NvBlast "; bool critical = false; switch (code) { case nvidia::NvErrorCode::eNO_ERROR: str << "[Info]"; critical = false; break; case nvidia::NvErrorCode::eDEBUG_INFO: str << "[Debug Info]"; critical = false; break; case nvidia::NvErrorCode::eDEBUG_WARNING: str << "[Debug Warning]"; critical = false; break; case nvidia::NvErrorCode::eINVALID_PARAMETER: str << "[Invalid Parameter]"; critical = true; break; case nvidia::NvErrorCode::eINVALID_OPERATION: str << "[Invalid Operation]"; critical = true; break; case nvidia::NvErrorCode::eOUT_OF_MEMORY: str << "[Out of] Memory"; critical = true; break; case nvidia::NvErrorCode::eINTERNAL_ERROR: str << "[Internal Error]"; critical = true; break; case nvidia::NvErrorCode::eABORT: str << "[Abort]"; critical = true; break; case nvidia::NvErrorCode::ePERF_WARNING: str << "[Perf Warning]"; critical = false; break; default: NVBLAST_ASSERT(false); } str << file << "(" << line << "): " << msg << "\n"; std::string message = str.str(); std::cout << message; #if NV_WINDOWS_FAMILY OutputDebugStringA(message.c_str()); #endif NVBLAST_ASSERT_WITH_MESSAGE(!critical, message.c_str()); #else NV_UNUSED(code); NV_UNUSED(msg); NV_UNUSED(file); NV_UNUSED(line); #endif } }; static DefaultErrorCallback s_defaultErrorCallback; static nvidia::NvAllocatorCallback* s_allocatorCallback = &s_defaultAllocatorCallback; static nvidia::NvErrorCallback* s_errorCallback = &s_defaultErrorCallback; nvidia::NvProfilerCallback *g_profilerCallback = nullptr; } // namespace Blast } // namespace Nv //////// Global API implementation //////// nvidia::NvAllocatorCallback* NvBlastGlobalGetAllocatorCallback() { return Nv::Blast::s_allocatorCallback; } void NvBlastGlobalSetAllocatorCallback(nvidia::NvAllocatorCallback* allocator) { Nv::Blast::s_allocatorCallback = allocator ? allocator : &Nv::Blast::s_defaultAllocatorCallback; } nvidia::NvErrorCallback* NvBlastGlobalGetErrorCallback() { return Nv::Blast::s_errorCallback; } void NvBlastGlobalSetErrorCallback(nvidia::NvErrorCallback* errorCallback) { Nv::Blast::s_errorCallback = errorCallback ? errorCallback : &Nv::Blast::s_defaultErrorCallback; } nvidia::NvProfilerCallback* NvBlastGlobalGetProfilerCallback() { return Nv::Blast::g_profilerCallback; } void NvBlastGlobalSetProfilerCallback(nvidia::NvProfilerCallback* profilerCallback) { Nv::Blast::g_profilerCallback = profilerCallback; }
6,403
C++
32.181347
114
0.689989
NVIDIA-Omniverse/PhysX/blast/source/sdk/globals/NvBlastInternalProfiler.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "stdint.h" #include "NvProfiler.h" #include "NvBlastGlobals.h" #include "NvBlastInternalProfiler.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { #define SUPPORTS_THREAD_LOCAL (!NV_VC || NV_VC > 12) struct InternalProfilerData { const char* name; void* data; }; #if SUPPORTS_THREAD_LOCAL static const int32_t PROFILER_MAX_NESTED_DEPTH = 64; static thread_local InternalProfilerData th_ProfileData[PROFILER_MAX_NESTED_DEPTH]; static thread_local int32_t th_depth = 0; #endif extern nvidia::NvProfilerCallback *g_profilerCallback; /** Wraps the nvidia::NvProfilerCallback set in NvBlastGlobalSetProfilerCallback. */ class InternalProfiler { public: /** Construct a InternalProfiler with platform specific profiler signals disabled. */ InternalProfiler() : m_platformEnabled(false) {} void zoneStart(const char* name) { #if SUPPORTS_THREAD_LOCAL if (g_profilerCallback) { void* data = g_profilerCallback->zoneStart(name, false, 0xb1a57); if (th_depth < PROFILER_MAX_NESTED_DEPTH && th_depth >= 0) { th_ProfileData[th_depth].name = name; th_ProfileData[th_depth].data = data; th_depth++; } else { NVBLAST_ASSERT(th_depth < PROFILER_MAX_NESTED_DEPTH && th_depth >= 0); } } #endif if (m_platformEnabled) { platformZoneStart(name); } } void zoneEnd() { #if SUPPORTS_THREAD_LOCAL if (g_profilerCallback) { th_depth--; if (th_depth >= 0) { InternalProfilerData& pd = th_ProfileData[th_depth]; g_profilerCallback->zoneEnd(pd.data, pd.name, false, 0xb1a57); } else { NVBLAST_ASSERT(th_depth >= 0); } } #endif if (m_platformEnabled) { platformZoneEnd(); } } ////// local interface ////// /** Enable or disable platform specific profiler signals. Disabled by default. \param[in] enabled true enables, false disables platform profiler calls. */ void setPlatformEnabled(bool enabled) { m_platformEnabled = enabled; } private: bool m_platformEnabled; }; static InternalProfiler g_InternalProfiler; static InternalProfilerDetail::Level g_ProfilerDetail = InternalProfilerDetail::LOW; void NvBlastInternalProfilerSetPlatformEnabled(bool platformEnabled) { return g_InternalProfiler.setPlatformEnabled(platformEnabled); } void NvBlastInternalProfilerSetDetail(InternalProfilerDetail::Level level) { g_ProfilerDetail = level; } InternalProfilerDetail::Level NvBlastProfilerGetDetail() { return g_ProfilerDetail; } void NvBlastProfilerBegin(const char* name, InternalProfilerDetail::Level level) { if (level <= NvBlastProfilerGetDetail()) { g_InternalProfiler.zoneStart(name); } } void NvBlastProfilerEnd(const void* /*name*/, InternalProfilerDetail::Level level) { if (level <= NvBlastProfilerGetDetail()) { g_InternalProfiler.zoneEnd(); } } } // namespace Blast } // namespace Nv
4,824
C++
27.052325
86
0.67558
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedPriorityQueue.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFIXEDPRIORITYQUEUE_H #define NVBLASTFIXEDPRIORITYQUEUE_H #include "NvBlastAssert.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { /*! FixedPriorityQueue is a priority queue container which is intended to be used with placement new on chunk of memory. It'll use following memory for data layout. As follows: // some memory char ​*buf = new char[64 *​ 1024]; // placement new on this memory FixedPriorityQueue<SomeClass>* arr = new (buf) FixedPriorityQueue<SomeClass>(); // you can get max requiredMemorySize by an array of 'capacity' elements count to use memory left buf = buf + FixedPriorityQueue<SomeClass>::requiredMemorySize(capacity); buf: +------------------------------------------------------------+ | uint32_t | T[0] | T[1] | T[2] | ... | +------------------------------------------------------------+ */ template <typename A> struct Less { bool operator()(const A& a, const A& b) const { return a < b; } }; template<class Element, class Comparator = Less<Element> > class FixedPriorityQueue : protected Comparator // inherit so that stateless comparators take no space { public: FixedPriorityQueue(const Comparator& less = Comparator()) : Comparator(less), mHeapSize(0) { } ~FixedPriorityQueue() { } static size_t requiredMemorySize(uint32_t capacity) { return align16(sizeof(FixedPriorityQueue<Element, Comparator>)) + align16(capacity * sizeof(Element)); } //! Get the element with the highest priority const Element top() const { return data()[0]; } //! Get the element with the highest priority Element top() { return data()[0]; } //! Check to whether the priority queue is empty bool empty() const { return (mHeapSize == 0); } //! Empty the priority queue void clear() { mHeapSize = 0; } //! Insert a new element into the priority queue. Only valid when size() is less than Capacity void push(const Element& value) { uint32_t newIndex; uint32_t parentIndex = parent(mHeapSize); for (newIndex = mHeapSize; newIndex > 0 && compare(value, data()[parentIndex]); newIndex = parentIndex, parentIndex= parent(newIndex)) { data()[ newIndex ] = data()[parentIndex]; } data()[newIndex] = value; mHeapSize++; NVBLAST_ASSERT(valid()); } //! Delete the highest priority element. Only valid when non-empty. Element pop() { NVBLAST_ASSERT(mHeapSize > 0); uint32_t i, child; //try to avoid LHS uint32_t tempHs = mHeapSize-1; mHeapSize = tempHs; Element min = data()[0]; Element last = data()[tempHs]; for (i = 0; (child = left(i)) < tempHs; i = child) { /* Find highest priority child */ const uint32_t rightChild = child + 1; child += ((rightChild < tempHs) & compare((data()[rightChild]), (data()[child]))) ? 1 : 0; if(compare(last, data()[child])) break; data()[i] = data()[child]; } data()[ i ] = last; NVBLAST_ASSERT(valid()); return min; } //! Make sure the priority queue sort all elements correctly bool valid() const { const Element& min = data()[0]; for(uint32_t i=1; i<mHeapSize; ++i) { if(compare(data()[i], min)) return false; } return true; } //! Return number of elements in the priority queue uint32_t size() const { return mHeapSize; } private: uint32_t mHeapSize; NV_FORCE_INLINE Element* data() { return (Element*)((char*)this + sizeof(FixedPriorityQueue<Element, Comparator>)); } NV_FORCE_INLINE Element* data() const { return (Element*)((char*)this + sizeof(FixedPriorityQueue<Element, Comparator>)); } bool compare(const Element& a, const Element& b) const { return Comparator::operator()(a,b); } static uint32_t left(uint32_t nodeIndex) { return (nodeIndex << 1) + 1; } static uint32_t parent(uint32_t nodeIndex) { return (nodeIndex - 1) >> 1; } FixedPriorityQueue<Element, Comparator>& operator = (const FixedPriorityQueue<Element, Comparator>); }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFIXEDPRIORITYQUEUE_H
6,160
C
28.338095
143
0.621753
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastTime.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTIME_H #define NVBLASTTIME_H #include "NvBlastTypes.h" namespace Nv { namespace Blast { class Time { public: Time() : m_lastTickCount(getTimeTicks()) {} int64_t getElapsedTicks() { const int64_t lastTickCount = m_lastTickCount; m_lastTickCount = getTimeTicks(); return m_lastTickCount - lastTickCount; } int64_t peekElapsedTicks() const { return getTimeTicks() - m_lastTickCount; } int64_t getLastTickCount() const { return m_lastTickCount; } static double seconds(int64_t ticks) { return s_secondsPerTick * ticks; } private: int64_t getTimeTicks() const; static double getTickDuration(); int64_t m_lastTickCount; static const double s_secondsPerTick; }; } // namespace Blast } // namespace Nv //////// Time inline functions for various platforms //////// #if NV_MICROSOFT_FAMILY #include "NvBlastIncludeWindows.h" NV_INLINE int64_t Nv::Blast::Time::getTimeTicks() const { LARGE_INTEGER a; QueryPerformanceCounter(&a); return a.QuadPart; } NV_INLINE double Nv::Blast::Time::getTickDuration() { LARGE_INTEGER a; QueryPerformanceFrequency(&a); return 1.0 / (double)a.QuadPart; } #elif NV_UNIX_FAMILY #include <time.h> NV_INLINE int64_t Nv::Blast::Time::getTimeTicks() const { struct timespec mCurrTimeInt; clock_gettime(CLOCK_REALTIME, &mCurrTimeInt); return (static_cast<int64_t>(mCurrTimeInt.tv_sec) * 1000000000) + (static_cast<int64_t>(mCurrTimeInt.tv_nsec)); } NV_INLINE double Nv::Blast::Time::getTickDuration() { return 1.e-9; } #endif #endif // #ifndef NVBLASTTIME_H
3,279
C
27.034188
115
0.706618
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedBoolArray.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFIXEDBOOLARRAY_H #define NVBLASTFIXEDBOOLARRAY_H #include "NvBlastAssert.h" #include "NvBlastMemory.h" #include <cstring> namespace Nv { namespace Blast { /*! FixedBoolArray is an array of bools of fixed size, it's intended to be used with placement new on chunk of memory. It'll use following memory for data layout. As follows: // some memory char ​*buf = new char[64 *​ 1024]; const uint32_t size = 100; // placement new on this memory FixedBoolArray* arr = new (buf) FixedBoolArray(size); // you can get max requiredMemorySize by an bitMap to use memory left buf = buf + FixedBoolArray<SomeClass>::requiredMemorySize(size); buf: +------------------------------------------------------------+ | uint32_t | bool0 | bool1 | bool2 | ... | +------------------------------------------------------------+ */ class FixedBoolArray { public: explicit FixedBoolArray(uint32_t size) { m_size = size; } static size_t requiredMemorySize(uint32_t size) { return align16(sizeof(FixedBoolArray)) + align16(size); } void clear() { memset(data(), 0, m_size); } void fill() { memset(data(), 1, m_size); } int test(uint32_t index) const { NVBLAST_ASSERT(index < m_size); return data()[index]; } void set(uint32_t index) { NVBLAST_ASSERT(index < m_size); data()[index] = 1; } void setData(const char* newData, uint32_t newSize) { m_size = newSize; memcpy(data(), newData, m_size); } const char* getData() const { return data(); } uint32_t getSize() const { return m_size; } void reset(uint32_t index) { NVBLAST_ASSERT(index < m_size); data()[index] = 0; } private: uint32_t m_size; NV_FORCE_INLINE char* data() { return ((char*)this + sizeof(FixedBoolArray)); } NV_FORCE_INLINE const char* data() const { return ((char*)this + sizeof(FixedBoolArray)); } private: FixedBoolArray(const FixedBoolArray& that); }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFIXEDBOOLARRAY_H
3,777
C
25.985714
114
0.65237
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastMath.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTMATH_H #define NVBLASTMATH_H #include <math.h> namespace Nv { namespace Blast { namespace VecMath { NV_INLINE void div(float a[3], float divisor) { for (int i = 0; i < 3; i++) a[i] /= divisor; } NV_INLINE void mul(float a[3], float multiplier) { for (int i = 0; i < 3; i++) a[i] *= multiplier; } NV_INLINE void add(const float a[3], float b[3]) { for (int i = 0; i < 3; i++) b[i] = a[i] + b[i]; } NV_INLINE void add(const float a[3], const float b[3], float r[3]) { for (int i = 0; i < 3; i++) r[i] = a[i] + b[i]; } NV_INLINE void sub(const float a[3], const float b[3], float r[3]) { for (int i = 0; i < 3; i++) r[i] = a[i] - b[i]; } NV_INLINE float dot(const float a[3], const float b[3]) { float r = 0; for (int i = 0; i < 3; i++) r += a[i] * b[i]; return r; } NV_INLINE float length(const float a[3]) { return sqrtf(dot(a, a)); } NV_INLINE float dist(const float a[3], const float b[3]) { float v[3]; sub(a, b, v); return length(v); } NV_INLINE float normal(const float a[3], float r[3]) { float d = length(a); for (int i = 0; i < 3; i++) r[i] = a[i] / d; return d; } } // namespace VecMath } // namespace Blast } // namespace Nv #endif // #ifndef NVBLASTMATH_H
2,884
C
25.46789
74
0.660194
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedQueue.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFIXEDQUEUE_H #define NVBLASTFIXEDQUEUE_H #include "NvBlastAssert.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { /*! FixedQueue is a queue container which is intended to be used with placement new on chunk of memory. It'll use following memory for data layout. As follows: // some memory char ​*buf = new char[64 *​ 1024]; // placement new on this memory FixedQueue<SomeClass>* arr = new (buf) FixedQueue<SomeClass>(); // you can get max requiredMemorySize by an array of 'capacity' elements count to use memory left buf = buf + FixedQueue<SomeClass>::requiredMemorySize(capacity); */ template <class T> class FixedQueue { public: explicit FixedQueue(uint32_t maxEntries) : m_num(0), m_head(0), m_tail(0), m_maxEntries(maxEntries) { } static size_t requiredMemorySize(uint32_t capacity) { return align16(sizeof(FixedQueue<T>)) + align16(capacity * sizeof(T)); } T popFront() { NVBLAST_ASSERT(m_num>0); m_num--; T& element = data()[m_tail]; m_tail = (m_tail+1) % (m_maxEntries); return element; } T front() { NVBLAST_ASSERT(m_num>0); return data()[m_tail]; } T popBack() { NVBLAST_ASSERT(m_num>0); m_num--; m_head = (m_head-1) % (m_maxEntries); return data()[m_head]; } T back() { NVBLAST_ASSERT(m_num>0); uint32_t headAccess = (m_head-1) % (m_maxEntries); return data()[headAccess]; } bool pushBack(const T& element) { if (m_num == m_maxEntries) return false; data()[m_head] = element; m_num++; m_head = (m_head+1) % (m_maxEntries); return true; } bool empty() const { return m_num == 0; } uint32_t size() const { return m_num; } private: uint32_t m_num; uint32_t m_head; uint32_t m_tail; uint32_t m_maxEntries; T* data() { return (T*)((char*)this + sizeof(FixedQueue<T>)); } private: FixedQueue(const FixedQueue& that); }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFIXEDQUEUE_H
3,777
C
25.794326
103
0.657665
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedArray.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFIXEDARRAY_H #define NVBLASTFIXEDARRAY_H #include "NvBlastAssert.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { /*! FixedArray is a sequential container which is intended to be used with placement new on chunk of memory. It'll use following memory for data layout. As follows: // some memory char ​*buf = new char[64 *​ 1024]; // placement new on this memory FixedArray<SomeClass>* arr = new (buf) FixedArray<SomeClass>(); // you can get max requiredMemorySize by an array of 'capacity' elements count to use memory left buf = buf + FixedArray<SomeClass>::requiredMemorySize(capacity); buf: +------------------------------------------------------------+ | uint32_t | T[0] | T[1] | T[2] | ... | +------------------------------------------------------------+ !!!TODO: - check ctor/dtor of elements calls */ template <class T> class FixedArray { public: explicit FixedArray() : m_size(0) { } static size_t requiredMemorySize(uint32_t capacity) { return align16(sizeof(FixedArray<T>)) + align16(capacity * sizeof(T)); } NV_FORCE_INLINE T& pushBack(T& t) { new (data() + m_size) T(t); return data()[m_size++]; } T popBack() { NVBLAST_ASSERT(m_size); T t = data()[m_size - 1]; data()[--m_size].~T(); return t; } void clear() { for(T* first = data(); first < data() + m_size; ++first) first->~T(); m_size = 0; } NV_FORCE_INLINE void forceSize_Unsafe(uint32_t s) { m_size = s; } NV_FORCE_INLINE T& operator[](uint32_t idx) { NVBLAST_ASSERT(idx < m_size); return data()[idx]; } NV_FORCE_INLINE const T& operator[](uint32_t idx) const { NVBLAST_ASSERT(idx < m_size); return data()[idx]; } NV_FORCE_INLINE T& at(uint32_t idx) { NVBLAST_ASSERT(idx < m_size); return data()[idx]; } NV_FORCE_INLINE const T& at(uint32_t idx) const { NVBLAST_ASSERT(idx < m_size); return data()[idx]; } NV_FORCE_INLINE uint32_t size() const { return m_size; } private: uint32_t m_size; NV_FORCE_INLINE T* data() { return (T*)((char*)this + sizeof(FixedArray<T>)); } private: FixedArray(const FixedArray& that); }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFIXEDARRAY_H
4,026
C
26.582192
104
0.635122
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastIteratorBase.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTITERATORBASE_H #define NVBLASTITERATORBASE_H #include "NvBlastIndexFns.h" namespace Nv { namespace Blast { /** Common functionality and implementation for iterators over an index, using invalidIndex<T>() to indicate termination. Derived class needs to implement increment operators. */ template<typename T> class IteratorBase { public: /** Constructor sets m_curr value */ IteratorBase(T curr); /** Validity of current value. */ operator bool() const; /** Current value. */ operator T() const; protected: T m_curr; }; //////// IteratorBase<T> inline methods //////// template<typename T> NV_INLINE IteratorBase<T>::IteratorBase(T curr) : m_curr(curr) { } template<typename T> NV_INLINE IteratorBase<T>::operator bool() const { return !isInvalidIndex<T>(m_curr); } template<typename T> NV_INLINE IteratorBase<T>::operator T() const { return m_curr; } /** Common functionality and implementation for an indexed linked list iterator */ template<typename IndexType> class LListIt : public IteratorBase<IndexType> { public: LListIt(IndexType curr, IndexType* links); /** Pre-increment. Only use if valid() == true. */ uint32_t operator ++ (); protected: IndexType* m_links; }; //////// LListIt<IndexType> inline methods //////// template<typename IndexType> NV_INLINE LListIt<IndexType>::LListIt(IndexType curr, IndexType* links) : IteratorBase<IndexType>(curr), m_links(links) { } template<typename IndexType> NV_INLINE uint32_t LListIt<IndexType>::operator ++ () { NVBLAST_ASSERT((bool)(*this)); return (this->m_curr = m_links[this->m_curr]); } /** Common functionality and implementation for an IndexDList<IndexType> iterator */ template<typename IndexType> class DListIt : public IteratorBase<IndexType> { public: DListIt(IndexType curr, IndexDLink<IndexType>* links); /** Pre-increment. Only use if valid() == true. */ uint32_t operator ++ (); protected: IndexDLink<IndexType>* m_links; }; //////// DListIt<IndexType> inline methods //////// template<typename IndexType> NV_INLINE DListIt<IndexType>::DListIt(IndexType curr, IndexDLink<IndexType>* links) : IteratorBase<IndexType>(curr), m_links(links) { } template<typename IndexType> NV_INLINE uint32_t DListIt<IndexType>::operator ++ () { NVBLAST_ASSERT((bool)(*this)); return (this->m_curr = m_links[this->m_curr].m_adj[1]); } } // end namespace Blast } // end namespace Nv #endif // #ifndef NVBLASTITERATORBASE_H
4,084
C
25.699346
131
0.721107
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastMemory.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTMEMORY_H #define NVBLASTMEMORY_H #include <math.h> namespace Nv { namespace Blast { /** Utility function to align the given value to the next 16-byte boundary. Returns the aligned value. */ template<typename T> NV_INLINE T align16(T value) { return (value + 0xF)&~(T)0xF; } /** Offset void* pointer by 'offset' bytes helper-functions */ template <typename T> NV_INLINE T pointerOffset(void* p, ptrdiff_t offset) { return reinterpret_cast<T>(reinterpret_cast<char*>(p)+offset); } template <typename T> NV_INLINE T pointerOffset(const void* p, ptrdiff_t offset) { return reinterpret_cast<T>(reinterpret_cast<const char*>(p)+offset); } NV_INLINE const void* pointerOffset(const void* p, ptrdiff_t offset) { return pointerOffset<const void*>(p, offset); } NV_INLINE void* pointerOffset(void* p, ptrdiff_t offset) { return pointerOffset<void*>(p, offset); } } // namespace Blast } // namespace Nv /** Block data offset and accessor macro. */ #define NvBlastBlockData(_dataType, _name, _accessor) \ _dataType* _accessor() const \ { \ return (_dataType*)((uintptr_t)this + _name); \ } \ uint32_t _name /** Block data offset and accessor macro for an array (includes an _accessor##ArraySize() function which returns the last expression). */ #define NvBlastBlockArrayData(_dataType, _name, _accessor, _sizeExpr) \ _dataType* _accessor() const \ { \ return (_dataType*)((uintptr_t)this + _name); \ } \ uint32_t _accessor##ArraySize() const \ { \ return _sizeExpr; \ } \ uint32_t _name /** Block data offset generation macros. */ /** Start offset generation with this. */ #define NvBlastCreateOffsetStart(_baseOffset) \ size_t _lastOffset = _baseOffset; \ size_t _lastSize = 0 /** Create the next offset generation with this. The value will be aligned to a 16-byte boundary. */ #define NvBlastCreateOffsetAlign16(_name, _size) \ _name = align16(_lastOffset + _lastSize); \ _lastOffset = _name; \ _lastSize = _size /** End offset generation with this. It evaluates to the (16-byte aligned) total size of the data block. */ #define NvBlastCreateOffsetEndAlign16() \ align16(_lastOffset + _lastSize) /** Stack allocation */ #if NV_WINDOWS_FAMILY #include <malloc.h> #define NvBlastAlloca(x) _alloca(x) #elif NV_LINUX || NV_ANDROID #include <alloca.h> #define NvBlastAlloca(x) alloca(x) #elif NV_APPLE_FAMILY #include <alloca.h> #define NvBlastAlloca(x) alloca(x) #endif #endif // #ifndef NVBLASTMEMORY_H
4,027
C
29.515151
137
0.73032
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastAssert.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastAssert.h" #include <stdio.h> #include <stdlib.h> #if NV_WINDOWS_FAMILY #include <crtdbg.h> #endif extern "C" { void NvBlastAssertHandler(const char* expr, const char* file, int line, bool& ignore) { NV_UNUSED(ignore); // is used only in debug windows config char buffer[1024]; #if NV_WINDOWS_FAMILY sprintf_s(buffer, 1024, "%s(%d) : Assertion failed: %s\n", file, line, expr); #else sprintf(buffer, "%s(%d) : Assertion failed: %s\n", file, line, expr); #endif puts(buffer); #if NV_WINDOWS_FAMILY && NV_DEBUG // _CrtDbgReport returns -1 on error, 1 on 'retry', 0 otherwise including 'ignore'. // Hitting 'abort' will terminate the process immediately. int result = _CrtDbgReport(_CRT_ASSERT, file, line, NULL, "%s", buffer); int mode = _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_REPORT_MODE); ignore = _CRTDBG_MODE_WNDW == mode && result == 0; if (ignore) return; __debugbreak(); #elif (NV_WINDOWS_FAMILY && NV_CHECKED) __debugbreak(); #else abort(); #endif } } // extern "C"
2,621
C++
38.134328
87
0.720336
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastVolumeIntegrals.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTVOLUMEINTEGRALS_H #define NVBLASTVOLUMEINTEGRALS_H #include "NvBlastNvSharedHelpers.h" #include "NvCMath.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast{ /** Calculate the volume and centroid of a closed mesh with outward-pointing normals. \param[out] centroid the calculated centroid of the given mesh \param[in] mesh a class of templated type MeshQuery MeshQuery must support the following functions: size_t faceCount() size_t vertexCount(size_t faceIndex) NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) \return the volume of the given mesh */ template<class MeshQuery> NV_INLINE float calculateMeshVolumeAndCentroid(NvcVec3& centroid, const MeshQuery& mesh) { centroid = { 0.0f, 0.0f, 0.0f }; // First find an approximate centroid for a more accurate calculation size_t N = 0; NvcVec3 disp = { 0.0f, 0.0f, 0.0f }; for (size_t i = 0; i < mesh.faceCount(); ++i) { const size_t faceVertexCount = mesh.vertexCount(i); for (size_t j = 0; j < faceVertexCount; ++j) { disp = disp + mesh.vertex(i, j); } N += faceVertexCount; } if (N == 0) { return 0.0f; } disp = disp / (float)N; float sixV = 0.0f; for (size_t i = 0; i < mesh.faceCount(); ++i) { const size_t faceVertexCount = mesh.vertexCount(i); if (faceVertexCount < 3) { continue; } const NvcVec3 a = mesh.vertex(i, 0) - disp; NvcVec3 b = mesh.vertex(i, 1) - disp; for (size_t j = 2; j < faceVertexCount; ++j) { const NvcVec3 c = mesh.vertex(i, j) - disp; const float sixTetV = a.x * b.y * c.z - a.x * b.z * c.y - a.y * b.x * c.z + a.y * b.z * c.x + a.z * b.x * c.y - a.z * b.y * c.x; sixV += sixTetV; centroid = centroid + sixTetV*(a + b + c); b = c; } } // Extra factor of four to average tet vertices centroid = centroid / (4.0f * sixV) + disp; return std::abs(sixV) / 6.0f; } } // namespace Blast } // namespace Nv #endif // NVBLASTVOLUMEINTEGRALS_H
3,744
C
31.284482
88
0.65812
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastGeometry.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTGEOMETRY_H #define NVBLASTGEOMETRY_H #include "NvBlastTypes.h" #include "NvBlastMath.h" #include "NvBlastAssert.h" #include <limits> namespace Nv { namespace Blast{ /** Find the closest node to point in the graph. Uses primarily distance to chunk centroids. Bond normals are expected to be directed from the lower to higher node index. Cannot be used for graph actors with only the external chunk in the graph. \param[in] point the point to test against \param[in] firstGraphNodeIndex the entry point for familyGraphNodeIndexLinks \param[in] familyGraphNodeIndexLinks the list index links of the actor's graph \param[in] adjacencyPartition the actor's SupportGraph adjacency partition \param[in] adjacentNodeIndices the actor's SupportGraph adjacent node indices \param[in] adjacentBondIndices the actor's SupportGraph adjacent bond indices \param[in] assetBonds the actor's asset bonds \param[in] bondHealths the actor's bond healths \param[in] assetChunks the actor's asset chunks \param[in] supportChunkHealths the actor's graph chunks healths \param[in] chunkIndices maps node index to chunk index in SupportGraph \return the index of the node closest to point */ NV_FORCE_INLINE uint32_t findClosestNode(const float point[4], const uint32_t firstGraphNodeIndex, const uint32_t* familyGraphNodeIndexLinks, const uint32_t* adjacencyPartition, const uint32_t* adjacentNodeIndices, const uint32_t* adjacentBondIndices, const NvBlastBond* assetBonds, const float* bondHealths, const NvBlastChunk* assetChunks, const float* supportChunkHealths, const uint32_t* chunkIndices) { // firstGraphNodeIndex could still be the external chunk, however // there should be no way a single-node actor that is just the external chunk exists. uint32_t nodeIndex = firstGraphNodeIndex; // Since there should always be a regular chunk in the graph, it is possible to initialize closestNode // as external chunk index but it would always evaluate to some meaningful node index eventually. uint32_t closestNode = nodeIndex; float minDist = std::numeric_limits<float>().max(); // find the closest healthy chunk in the graph by its centroid to point distance while (!Nv::Blast::isInvalidIndex(nodeIndex)) { if (supportChunkHealths[nodeIndex] > 0.0f) { uint32_t chunkIndex = chunkIndices[nodeIndex]; if (!isInvalidIndex(chunkIndex)) // Invalid if this is the external chunk { const NvBlastChunk& chunk = assetChunks[chunkIndex]; const float* centroid = chunk.centroid; float d[3]; VecMath::sub(point, centroid, d); float dist = VecMath::dot(d, d); if (dist < minDist) { minDist = dist; closestNode = nodeIndex; } } } nodeIndex = familyGraphNodeIndexLinks[nodeIndex]; } // as long as the external chunk is not input as a single-node graph actor NVBLAST_ASSERT(!isInvalidIndex(chunkIndices[closestNode])); bool iterateOnBonds = true; if (iterateOnBonds) { // improve geometric accuracy by looking on which side of the closest bond the point lies // expects bond normals to point from the smaller to the larger node index nodeIndex = closestNode; minDist = std::numeric_limits<float>().max(); const uint32_t startIndex = adjacencyPartition[nodeIndex]; const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1]; for (uint32_t adjacentIndex = startIndex; adjacentIndex < stopIndex; adjacentIndex++) { const uint32_t neighbourIndex = adjacentNodeIndices[adjacentIndex]; const uint32_t neighbourChunk = chunkIndices[neighbourIndex]; if (!isInvalidIndex(neighbourChunk)) // Invalid if neighbor is the external chunk { const uint32_t bondIndex = adjacentBondIndices[adjacentIndex]; // do not follow broken bonds, since it means that neighbor is not actually connected in the graph if (bondHealths[bondIndex] > 0.0f && supportChunkHealths[neighbourIndex] > 0.0f) { const NvBlastBond& bond = assetBonds[bondIndex]; const float* centroid = bond.centroid; float d[3]; VecMath::sub(point, centroid, d); float dist = VecMath::dot(d, d); if (dist < minDist) { minDist = dist; float s = VecMath::dot(d, bond.normal); if (nodeIndex < neighbourIndex) { closestNode = s < 0.0f ? nodeIndex : neighbourIndex; } else { closestNode = s < 0.0f ? neighbourIndex : nodeIndex; } } } } } } return closestNode; } /** Find the closest node to point in the graph. Uses primarily distance to bond centroids. Slower compared to chunk based lookup but may yield better accuracy in some cases. Bond normals are expected to be directed from the lower to higher node index. Cannot be used for graph actors with only the external chunk in the graph. \param[in] point the point to test against \param[in] firstGraphNodeIndex the entry point for familyGraphNodeIndexLinks \param[in] familyGraphNodeIndexLinks the list index links of the actor's graph \param[in] adjacencyPartition the actor's SupportGraph adjacency partition \param[in] adjacentNodeIndices the actor's SupportGraph adjacent node indices \param[in] adjacentBondIndices the actor's SupportGraph adjacent bond indices \param[in] assetBonds the actor's asset bonds \param[in] bondHealths the actor's bond healths \param[in] chunkIndices maps node index to chunk index in SupportGraph \return the index of the node closest to point */ NV_FORCE_INLINE uint32_t findClosestNode(const float point[4], const uint32_t firstGraphNodeIndex, const uint32_t* familyGraphNodeIndexLinks, const uint32_t* adjacencyPartition, const uint32_t* adjacentNodeIndices, const uint32_t* adjacentBondIndices, const NvBlastBond* bonds, const float* bondHealths, const uint32_t* chunkIndices) { // firstGraphNodeIndex could still be the external chunk, however // there should be no way a single-node actor that is just the external chunk exists. uint32_t nodeIndex = firstGraphNodeIndex; // Since there should always be a regular chunk in the graph, it is possible to initialize closestNode // as external chunk index but it would always evaluate to some meaningful node index eventually. uint32_t closestNode = nodeIndex; float minDist = std::numeric_limits<float>().max(); while (!Nv::Blast::isInvalidIndex(nodeIndex)) { const uint32_t startIndex = adjacencyPartition[nodeIndex]; const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1]; for (uint32_t adjacentIndex = startIndex; adjacentIndex < stopIndex; adjacentIndex++) { const uint32_t neighbourIndex = adjacentNodeIndices[adjacentIndex]; if (nodeIndex < neighbourIndex) { const uint32_t bondIndex = adjacentBondIndices[adjacentIndex]; if (bondHealths[bondIndex] > 0.0f) { const NvBlastBond& bond = bonds[bondIndex]; const float* centroid = bond.centroid; float d[3]; VecMath::sub(point, centroid, d); float dist = VecMath::dot(d, d); if (dist < minDist) { minDist = dist; // if any of the nodes is the external chunk, use the valid one instead if (isInvalidIndex(chunkIndices[neighbourIndex])) { closestNode = nodeIndex; } else if (isInvalidIndex(chunkIndices[nodeIndex])) { closestNode = neighbourIndex; } else { float s = VecMath::dot(d, bond.normal); closestNode = s < 0 ? nodeIndex : neighbourIndex; } } } } } nodeIndex = familyGraphNodeIndexLinks[nodeIndex]; } // as long as the external chunk is not input as a single-node graph actor NVBLAST_ASSERT(!isInvalidIndex(chunkIndices[closestNode])); return closestNode; } } // namespace Blast } // namespace Nv #endif // NVBLASTGEOMETRY_H
10,765
C
44.42616
114
0.63948
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastPreprocessorInternal.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTPREPROCESSORINTERNAL_H #define NVBLASTPREPROCESSORINTERNAL_H #include "NvPreprocessor.h" /** Macros for more convenient logging */ #define NVBLASTLL_LOG_ERROR(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Error, _msg, __FILE__, __LINE__); } ((void)0) #define NVBLASTLL_LOG_WARNING(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Warning, _msg, __FILE__, __LINE__); } ((void)0) #define NVBLASTLL_LOG_INFO(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Info, _msg, __FILE__, __LINE__); } ((void)0) #define NVBLASTLL_LOG_DEBUG(_logFn, _msg) if (_logFn != nullptr) { _logFn(NvBlastMessage::Debug, _msg, __FILE__, __LINE__); } ((void)0) /** Blast will check function parameters for debug and checked builds. */ #define NVBLASTLL_CHECK_PARAMS (NV_DEBUG || NV_CHECKED) #if NVBLASTLL_CHECK_PARAMS #define NVBLASTLL_CHECK(_expr, _logFn, _msg, _onFail) \ { \ if(!(_expr)) \ { \ if (_logFn) { _logFn(NvBlastMessage::Error, _msg, __FILE__, __LINE__); } \ { _onFail; }; \ } \ } #else #define NVBLASTLL_CHECK(_expr, _logFn, _msg, _onFail) NV_UNUSED(_logFn) #endif /** Convenience macro to replace deprecated UINT32_MAX */ #ifndef UINT32_MAX #include <limits> #define UINT32_MAX (std::numeric_limits<uint32_t>::max()) #endif #endif // ifndef NVBLASTPREPROCESSORINTERNAL_H
3,721
C
50.694444
143
0.562483
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastDLink.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTDLINK_H #define NVBLASTDLINK_H #include "NvBlastAssert.h" #include "NvBlastIndexFns.h" namespace Nv { namespace Blast { template<typename IndexType> struct IndexDLink { IndexType m_adj[2]; }; template<typename IndexType> class IndexDList { public: void initLinksSolitary(IndexDLink<IndexType>* links, IndexType linkCount) { for (IndexType i = 0; i < linkCount; ++i) { links[i].m_adj[0] = invalidIndex<IndexType>(); links[i].m_adj[1] = invalidIndex<IndexType>(); } } void initLinksChain(IndexDLink<IndexType>* links, IndexType linkCount) { if (linkCount > 0) { links[0].m_adj[0] = invalidIndex<IndexType>(); for (IndexType i = 1; i < linkCount; ++i) { links[i - 1].m_adj[1] = i; links[i].m_adj[0] = i - 1; } links[linkCount - 1].m_adj[1] = invalidIndex<IndexType>(); } } IndexType getAdj(IndexDLink<IndexType>* links, IndexType linkIndex, int which) { return links[linkIndex].m_adj[which & 1]; } void remove(IndexDLink<IndexType>* links, IndexType linkIndex) { IndexDLink<IndexType>& link = links[linkIndex]; const IndexType adj0 = link.m_adj[0]; const IndexType adj1 = link.m_adj[1]; if (!isInvalidIndex(adj1)) { links[adj1].m_adj[0] = adj0; link.m_adj[1] = invalidIndex<IndexType>(); } if (!isInvalidIndex(adj0)) { links[adj0].m_adj[1] = adj1; link.m_adj[0] = invalidIndex<IndexType>(); } } bool isSolitary(IndexDLink<IndexType>* links, IndexType linkIndex) { const IndexDLink<IndexType>& link = links[linkIndex]; return isInvalidIndex(link.m_adj[0]) && isInvalidIndex(link.m_adj[1]); } void insertListHead(IndexType& listHead, IndexDLink<IndexType>* links, IndexType linkIndex) { NVBLAST_ASSERT(!isInvalidIndex(linkIndex)); if (!isInvalidIndex(listHead)) { links[listHead].m_adj[0] = linkIndex; } links[linkIndex].m_adj[1] = listHead; listHead = linkIndex; } IndexType removeListHead(IndexType& listHead, IndexDLink<IndexType>* links) { const IndexType linkIndex = listHead; if (!isInvalidIndex(linkIndex)) { listHead = links[linkIndex].m_adj[1]; if (!isInvalidIndex(listHead)) { links[listHead].m_adj[0] = invalidIndex<IndexType>(); } links[linkIndex].m_adj[1] = invalidIndex<IndexType>(); } return linkIndex; } void removeFromList(IndexType& listHead, IndexDLink<IndexType>* links, IndexType linkIndex) { NVBLAST_ASSERT(!isInvalidIndex(linkIndex)); if (listHead == linkIndex) { listHead = links[linkIndex].m_adj[1]; } remove(links, linkIndex); } }; struct DLink { DLink() : m_prev(nullptr), m_next(nullptr) {} DLink* getPrev() const { return m_prev; } DLink* getNext() const { return m_next; } private: DLink* m_prev; DLink* m_next; friend class DList; }; class DList { public: DList() : m_head(nullptr), m_tail(nullptr) {} bool isEmpty() const { NVBLAST_ASSERT((m_head == nullptr) == (m_tail == nullptr)); return m_head == nullptr; } bool isSolitary(const DLink& link) const { return link.m_prev == nullptr && link.m_next == nullptr && m_head != &link; } DLink* getHead() const { return m_head; } DLink* getTail() const { return m_tail; } bool insertHead(DLink& link) { NVBLAST_ASSERT(isSolitary(link)); if (!isSolitary(link)) { return false; } link.m_next = m_head; if (m_head != nullptr) { m_head->m_prev = &link; } m_head = &link; if (m_tail == nullptr) { m_tail = &link; } return true; } bool insertTail(DLink& link) { NVBLAST_ASSERT(isSolitary(link)); if (!isSolitary(link)) { return false; } link.m_prev = m_tail; if (m_tail != nullptr) { m_tail->m_next = &link; } m_tail = &link; if (m_head == nullptr) { m_head = &link; } return true; } void remove(DLink& link) { if (link.m_prev != nullptr) { link.m_prev->m_next = link.m_next; } else if (m_head == &link) { m_head = link.m_next; } if (link.m_next != nullptr) { link.m_next->m_prev = link.m_prev; } else if (m_tail == &link) { m_tail = link.m_prev; } link.m_next = link.m_prev = nullptr; } class It { public: enum Direction { Reverse, Forward }; It(const DList& list, Direction dir = Forward) : m_curr(dir == Forward ? list.getHead() : list.getTail()) {} /** Validity of current value. */ operator bool() const { return m_curr != nullptr; } /** Current value. */ operator const DLink*() const { return m_curr; } /** Pre-increment. */ const DLink* operator ++ () { return m_curr = m_curr->getNext(); } /** Pre-deccrement. */ const DLink* operator -- () { return m_curr = m_curr->getPrev(); } private: const DLink* m_curr; }; private: DLink* m_head; DLink* m_tail; }; } // end namespace Blast } // end namespace Nv #endif // #ifndef NVBLASTDLINK_H
7,611
C
24.122112
116
0.559322
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastIndexFns.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTINDEXFNS_H #define NVBLASTINDEXFNS_H #include "NvBlastTypes.h" #include <cstring> namespace Nv { namespace Blast { /** Set to invalid index. */ template<typename T> NV_INLINE T invalidIndex() { return ~(T)0; } /** Test for invalid index (max representable integer). */ template<typename T> NV_INLINE bool isInvalidIndex(T index) { return index == invalidIndex<T>(); } /** Create a lookup table for data sorted by a templated index type. Note: when using this function with unsigned integer index types invalidIndex<T>() is treated as a value less than zero. On input: The indices must lie in the interval [indexBase, indexBase+indexRange]. indexSource must point to the first index in the data. indexCount must be set to the number of indices in the data. indexByteStride must be set to the distance, in bytes, between subequent indices. lookup must point to a T array of size indexRange+2. On return: lookup will be filled such that: lookup[i] = the position of first data element with index (i + indexBase) lookup[indexRange+1] = indexCount The last (indexRange+1) element is used so that one may always determine the number of data elements with the given index using: count = lookup[i+1] - lookup[i] Note, if an index (i + indexBase) is not present in the data then, lookup[i+1] = lookup[i], so the count (above) will correctly be zero. In this case, the actual value of lookup[i] is irrelevant. */ template<typename T> void createIndexStartLookup(T* lookup, T indexBase, T indexRange, T* indexSource, T indexCount, T indexByteStride) { ++indexBase; // Ordering invalidIndex<T>() as lowest value T indexPos = 0; for (T i = 0; i <= indexRange; ++i) { for (; indexPos < indexCount; ++indexPos, indexSource = (T*)((uintptr_t)indexSource + indexByteStride)) { if (*indexSource + 1 >= i + indexBase) // +1 to order invalidIndex<T>() as lowest value { lookup[i] = indexPos; break; } } if (indexPos == indexCount) { lookup[i] = indexPos; } } lookup[indexRange + 1] = indexCount; } /** Creates the inverse of a map, such that inverseMap[map[i]] = i. Unmapped indices are set to invalidIndex<T>. \param[out] inverseMap inverse map space of given size \param[in] map original map of given size, unmapped entries must contain invalidIndex<T> \param[in] size size of the involved maps */ template<typename T> void invertMap(T* inverseMap, const T* map, const T size) { memset(inverseMap, invalidIndex<T>(), size*sizeof(T)); for (T i = 0; i < size; i++) { if (!isInvalidIndex(map[i])) { inverseMap[map[i]] = i; } } } } // end namespace Blast } // end namespace Nv #endif // #ifndef NVBLASTINDEXFNS_H
4,587
C
30.641379
144
0.677349
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedBitmap.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFIXEDBITMAP_H #define NVBLASTFIXEDBITMAP_H #include "NvBlastAssert.h" #include "NvBlastMemory.h" #include <cstring> namespace Nv { namespace Blast { /*! FixedBitmap is a bitset (bitmap) of fixed size, it's intended to be used with placement new on chunk of memory. It'll use following memory for data layout. As follows: // some memory char ​*buf = new char[64 *​ 1024]; const uint32_t bitsCount = 100; // placement new on this memory FixedBitmap* arr = new (buf) FixedBitmap(bitsCount); // you can get max requiredMemorySize by an bitMap to use memory left buf = buf + FixedBitmap::requiredMemorySize(bitsCount); buf: +------------------------------------------------------------+ | uint32_t | word0 | word1 | word2 | ... | +------------------------------------------------------------+ */ class FixedBitmap { public: explicit FixedBitmap(uint32_t bitsCount) { m_bitsCount = bitsCount; } static uint32_t getWordsCount(uint32_t bitsCount) { return (bitsCount + 31) >> 5; } static size_t requiredMemorySize(uint32_t bitsCount) { return align16(sizeof(FixedBitmap)) + align16(getWordsCount(bitsCount) * sizeof(uint32_t)); } void clear() { memset(data(), 0, getWordsCount(m_bitsCount) * sizeof(uint32_t)); } void fill() { const uint32_t wordCount = getWordsCount(m_bitsCount); uint32_t* mem = data(); memset(mem, 0xFF, wordCount * sizeof(uint32_t)); const uint32_t bitsRemainder = m_bitsCount & 31; if (bitsRemainder > 0) { mem[wordCount - 1] &= ~(0xFFFFFFFF << bitsRemainder); } } int test(uint32_t index) const { NVBLAST_ASSERT(index < m_bitsCount); return data()[index >> 5] & (1 << (index & 31)); } void set(uint32_t index) { NVBLAST_ASSERT(index < m_bitsCount); data()[index >> 5] |= 1 << (index & 31); } void reset(uint32_t index) { NVBLAST_ASSERT(index < m_bitsCount); data()[index >> 5] &= ~(1 << (index & 31)); } private: uint32_t m_bitsCount; NV_FORCE_INLINE uint32_t* data() { return (uint32_t*)((char*)this + sizeof(FixedBitmap)); } NV_FORCE_INLINE const uint32_t* data() const { return (uint32_t*)((char*)this + sizeof(FixedBitmap)); } private: FixedBitmap(const FixedBitmap& that); }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFIXEDBITMAP_H
4,088
C
29.066176
111
0.651663
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastNvSharedHelpers.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTNVSHAREDSHELPERS_H #define NVBLASTNVSHAREDSHELPERS_H #include "NvCTypes.h" #include "NvVec2.h" #include "NvVec3.h" #include "NvVec4.h" #include "NvTransform.h" #include "NvPlane.h" #include "NvMat33.h" #include "NvMat44.h" #include "NvBounds3.h" using namespace nvidia; #define WCast(type, name) reinterpret_cast<type>(name) #define RCast(type, name) reinterpret_cast<const type>(name) #define CONVERT(BlastType, NvSharedType) \ static inline NvSharedType& toNvShared(BlastType& v) \ { \ return WCast(NvSharedType&, v); \ } \ static inline const NvSharedType& toNvShared(const BlastType& v) \ { \ return RCast(NvSharedType&, v); \ } \ static inline const BlastType& fromNvShared(const NvSharedType& v) \ { \ return RCast(BlastType&, v); \ } \ static inline BlastType& fromNvShared(NvSharedType& v) \ { \ return WCast(BlastType&, v); \ } \ static inline NvSharedType* toNvShared(BlastType* v) \ { \ return WCast(NvSharedType*, v); \ } \ static inline const NvSharedType* toNvShared(const BlastType* v) \ { \ return RCast(NvSharedType*, v); \ } \ static inline const BlastType* fromNvShared(const NvSharedType* v) \ { \ return RCast(BlastType*, v); \ } \ static inline BlastType* fromNvShared(NvSharedType* v) \ { \ return WCast(BlastType*, v); \ } CONVERT(NvcVec2, nvidia::NvVec2) CONVERT(NvcVec3, nvidia::NvVec3) CONVERT(NvcVec4, nvidia::NvVec4) CONVERT(NvcQuat, nvidia::NvQuat) CONVERT(NvcTransform, nvidia::NvTransform) CONVERT(NvcPlane, nvidia::NvPlane) CONVERT(NvcMat33, nvidia::NvMat33) CONVERT(NvcMat44, nvidia::NvMat44) CONVERT(NvcBounds3, nvidia::NvBounds3) NV_COMPILE_TIME_ASSERT(sizeof(NvcVec2) == sizeof(nvidia::NvVec2)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec2, x) == NV_OFFSET_OF(nvidia::NvVec2, x)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec2, y) == NV_OFFSET_OF(nvidia::NvVec2, y)); NV_COMPILE_TIME_ASSERT(sizeof(NvcVec3) == sizeof(nvidia::NvVec3)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, x) == NV_OFFSET_OF(nvidia::NvVec3, x)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, y) == NV_OFFSET_OF(nvidia::NvVec3, y)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec3, z) == NV_OFFSET_OF(nvidia::NvVec3, z)); NV_COMPILE_TIME_ASSERT(sizeof(NvcVec4) == sizeof(nvidia::NvVec4)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, x) == NV_OFFSET_OF(nvidia::NvVec4, x)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, y) == NV_OFFSET_OF(nvidia::NvVec4, y)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, z) == NV_OFFSET_OF(nvidia::NvVec4, z)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcVec4, w) == NV_OFFSET_OF(nvidia::NvVec4, w)); NV_COMPILE_TIME_ASSERT(sizeof(NvcQuat) == sizeof(nvidia::NvQuat)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, x) == NV_OFFSET_OF(nvidia::NvQuat, x)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, y) == NV_OFFSET_OF(nvidia::NvQuat, y)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, z) == NV_OFFSET_OF(nvidia::NvQuat, z)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcQuat, w) == NV_OFFSET_OF(nvidia::NvQuat, w)); NV_COMPILE_TIME_ASSERT(sizeof(NvcTransform) == sizeof(nvidia::NvTransform)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcTransform, p) == NV_OFFSET_OF(nvidia::NvTransform, p)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcTransform, q) == NV_OFFSET_OF(nvidia::NvTransform, q)); NV_COMPILE_TIME_ASSERT(sizeof(NvcPlane) == sizeof(nvidia::NvPlane)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcPlane, n) == NV_OFFSET_OF(nvidia::NvPlane, n)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcPlane, d) == NV_OFFSET_OF(nvidia::NvPlane, d)); NV_COMPILE_TIME_ASSERT(sizeof(NvcMat33) == sizeof(nvidia::NvMat33)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column0) == NV_OFFSET_OF(nvidia::NvMat33, column0)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column1) == NV_OFFSET_OF(nvidia::NvMat33, column1)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcMat33, column2) == NV_OFFSET_OF(nvidia::NvMat33, column2)); NV_COMPILE_TIME_ASSERT(sizeof(NvcBounds3) == sizeof(nvidia::NvBounds3)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcBounds3, minimum) == NV_OFFSET_OF(nvidia::NvBounds3, minimum)); NV_COMPILE_TIME_ASSERT(NV_OFFSET_OF(NvcBounds3, maximum) == NV_OFFSET_OF(nvidia::NvBounds3, maximum)); #endif // #ifndef NVBLASTNVSHAREDSHELPERS_H
8,814
C
66.290076
126
0.482982
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastFamily.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTypes.h" #include "NvBlastFamily.h" #include "NvBlastFamilyGraph.h" #include "NvBlastIndexFns.h" #include "NvBlastTime.h" #include <new> namespace Nv { namespace Blast { //////// Global functions //////// struct FamilyDataOffsets { size_t m_actors; size_t m_visibleChunkIndexLinks; size_t m_chunkActorIndices; size_t m_graphNodeIndexLinks; size_t m_lowerSupportChunkHealths; size_t m_graphBondHealths; size_t m_graphCachedBondHealths; size_t m_familyGraph; }; static size_t createFamilyDataOffsets(FamilyDataOffsets& offsets, const NvBlastAssetMemSizeData& sizeData) { NvBlastCreateOffsetStart(sizeof(FamilyHeader)); NvBlastCreateOffsetAlign16(offsets.m_actors, sizeData.lowerSupportChunkCount * sizeof(Actor)); NvBlastCreateOffsetAlign16(offsets.m_visibleChunkIndexLinks, sizeData.chunkCount * sizeof(IndexDLink<uint32_t>)); NvBlastCreateOffsetAlign16(offsets.m_chunkActorIndices, sizeData.upperSupportChunkCount * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_graphNodeIndexLinks, sizeData.nodeCount * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_lowerSupportChunkHealths, sizeData.lowerSupportChunkCount * sizeof(float)); NvBlastCreateOffsetAlign16(offsets.m_graphBondHealths, sizeData.bondCount * sizeof(float)); NvBlastCreateOffsetAlign16(offsets.m_graphCachedBondHealths, sizeData.bondCount * sizeof(float)); NvBlastCreateOffsetAlign16(offsets.m_familyGraph, static_cast<size_t>(FamilyGraph::requiredMemorySize(sizeData.nodeCount, sizeData.bondCount))); return NvBlastCreateOffsetEndAlign16(); } size_t getFamilyMemorySize(const Asset* asset) { #if NVBLASTLL_CHECK_PARAMS if (asset == nullptr) { NVBLAST_ALWAYS_ASSERT(); return 0; } #endif const NvBlastAssetMemSizeData sizeData = NvBlastAssetMemSizeDataFromAsset(asset); return getFamilyMemorySize(sizeData); } size_t getFamilyMemorySize(const NvBlastAssetMemSizeData& sizeData) { FamilyDataOffsets offsets; return createFamilyDataOffsets(offsets, sizeData); } // this path is used by the serialization code // buffers are set up, but some parts (like asset ID) are left to the serialization code to fill in static NvBlastFamily* createFamily(void* mem, const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn) { NVBLASTLL_CHECK(mem != nullptr, logFn, "createFamily: NULL mem pointer input.", return nullptr); NVBLASTLL_CHECK((reinterpret_cast<uintptr_t>(mem) & 0xF) == 0, logFn, "createFamily: mem pointer not 16-byte aligned.", return nullptr); if (sizeData.chunkCount == 0) { NVBLASTLL_LOG_ERROR(logFn, "createFamily: Asset has no chunks. Family not created.\n"); return nullptr; } const uint32_t bondCount = sizeData.bondCount; // We need to keep this many actor representations around for our island indexing scheme. const uint32_t lowerSupportChunkCount = sizeData.lowerSupportChunkCount; // We need this many chunk actor indices. const uint32_t upperSupportChunkCount = sizeData.upperSupportChunkCount; // Family offsets FamilyDataOffsets offsets; const size_t dataSize = createFamilyDataOffsets(offsets, sizeData); // Restricting our data size to < 4GB so that we may use uint32_t offsets if (dataSize > (size_t)UINT32_MAX) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::instanceAllocate: Instance data block size will exceed 4GB. Instance not created.\n"); return nullptr; } // Allocate family NvBlastFamily* family = reinterpret_cast<NvBlastFamily*>(memset(mem, 0, dataSize)); // Fill in family header FamilyHeader* header = (FamilyHeader*)family; header->dataType = NvBlastDataBlock::FamilyDataBlock; header->formatVersion = 0; // Not currently using this field header->size = (uint32_t)dataSize; header->m_actorCount = 0; header->m_actorsOffset = (uint32_t)offsets.m_actors; header->m_visibleChunkIndexLinksOffset = (uint32_t)offsets.m_visibleChunkIndexLinks; header->m_chunkActorIndicesOffset = (uint32_t)offsets.m_chunkActorIndices; header->m_graphNodeIndexLinksOffset = (uint32_t)offsets.m_graphNodeIndexLinks; header->m_lowerSupportChunkHealthsOffset = (uint32_t)offsets.m_lowerSupportChunkHealths; header->m_graphBondHealthsOffset = (uint32_t)offsets.m_graphBondHealths; header->m_graphCachedBondHealthsOffset = (uint32_t)offsets.m_graphCachedBondHealths; header->m_familyGraphOffset = (uint32_t)offsets.m_familyGraph; // Initialize family header data: // Actors - initialize to defaults, with zero offset value (indicating inactive state) Actor* actors = header->getActors(); // This will get the subsupport actors too for (uint32_t i = 0; i < lowerSupportChunkCount; ++i) { new (actors + i) Actor(); } // Visible chunk index links - initialize to solitary links (0xFFFFFFFF fields) memset(header->getVisibleChunkIndexLinks(), 0xFF, sizeData.chunkCount*sizeof(IndexDLink<uint32_t>)); // Chunk actor IDs - initialize to invalid (0xFFFFFFFF) memset(header->getChunkActorIndices(), 0xFF, upperSupportChunkCount*sizeof(uint32_t)); // Graph node index links - initialize to solitary links memset(header->getGraphNodeIndexLinks(), 0xFF, sizeData.nodeCount*sizeof(uint32_t)); // Healths are initialized to 0 - the entire memory block is already set to 0 above // memset(header->getLowerSupportChunkHealths(), 0, lowerSupportChunkCount*sizeof(float)); // memset(header->getBondHealths(), 0, bondCount*sizeof(float)); // FamilyGraph ctor new (header->getFamilyGraph()) FamilyGraph(sizeData.nodeCount, sizeData.bondCount); return family; } // this path is taken when an asset already exists and a family is to be created from it directly static NvBlastFamily* createFamily(void* mem, const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "createFamily: NULL asset pointer input.", return nullptr); const Asset* solverAsset = static_cast<const Asset*>(asset); // pull count info from the asset and use that to initialize the family buffers NvBlastAssetMemSizeData sizeData = NvBlastAssetMemSizeDataFromAsset(solverAsset); NvBlastFamily* family = createFamily(mem, sizeData, logFn); if (family != nullptr) { // set the asset ID and pointer since we have them available FamilyHeader* header = reinterpret_cast<FamilyHeader*>(family); header->m_assetID = solverAsset->m_ID; header->m_asset = solverAsset; } return family; } //////// Family member methods //////// void FamilyHeader::fractureSubSupportNoEvents(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks) { const NvBlastChunk& chunk = chunks[chunkIndex]; uint32_t numChildren = chunk.childIndexStop - chunk.firstChildIndex; if (numChildren > 0) { healthDamage /= numChildren; for (uint32_t childIndex = chunk.firstChildIndex; childIndex < chunk.childIndexStop; childIndex++) { float& health = chunkHealths[childIndex - suboffset]; if (canTakeDamage(health)) { float remainingDamage = healthDamage - health; health -= healthDamage; NVBLAST_ASSERT(chunks[childIndex].parentChunkIndex == chunkIndex); if (health <= 0.0f && remainingDamage > 0.0f) { fractureSubSupportNoEvents(childIndex, suboffset, remainingDamage, chunkHealths, chunks); } } } } } void FamilyHeader::fractureSubSupport(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks, NvBlastChunkFractureData* outBuffer, uint32_t* currentIndex, const uint32_t maxCount) { const NvBlastChunk& chunk = chunks[chunkIndex]; uint32_t numChildren = chunk.childIndexStop - chunk.firstChildIndex; if (numChildren > 0) { healthDamage /= numChildren; for (uint32_t childIndex = chunk.firstChildIndex; childIndex < chunk.childIndexStop; childIndex++) { float& health = chunkHealths[childIndex - suboffset]; if (canTakeDamage(health)) { float remainingDamage = healthDamage - health; health -= healthDamage; NVBLAST_ASSERT(chunks[childIndex].parentChunkIndex == chunkIndex); if (*currentIndex < maxCount) { NvBlastChunkFractureData& event = outBuffer[*currentIndex]; event.userdata = chunks[childIndex].userData; event.chunkIndex = childIndex; event.health = health; } (*currentIndex)++; if (health <= 0.0f && remainingDamage > 0.0f) { fractureSubSupport(childIndex, suboffset, remainingDamage, chunkHealths, chunks, outBuffer, currentIndex, maxCount); } } } } } void FamilyHeader::fractureNoEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* chunkFractures, Actor* filterActor, NvBlastLog logFn) { const SupportGraph& graph = m_asset->m_graph; const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices(); float* bondHealths = getBondHealths(); float* chunkHealths = getLowerSupportChunkHealths(); float* subChunkHealths = getSubsupportChunkHealths(); const NvBlastChunk* chunks = m_asset->getChunks(); for (uint32_t i = 0; i < chunkFractureCount; ++i) { const NvBlastChunkFractureData& command = chunkFractures[i]; const uint32_t chunkIndex = command.chunkIndex; const uint32_t chunkHealthIndex = m_asset->getContiguousLowerSupportIndex(chunkIndex); NVBLAST_ASSERT(!isInvalidIndex(chunkHealthIndex)); if (isInvalidIndex(chunkHealthIndex)) { continue; } float& health = chunkHealths[chunkHealthIndex]; if (canTakeDamage(health) && command.health > 0.0f) { Actor* actor = getChunkActor(chunkIndex); if (filterActor && filterActor != actor) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: chunk fracture command corresponds to other actor, command is ignored."); } else if (actor) { const uint32_t nodeIndex = m_asset->getChunkToGraphNodeMap()[chunkIndex]; if (actor->getGraphNodeCount() > 1 && !isInvalidIndex(nodeIndex)) { for (uint32_t adjacentIndex = graphAdjacencyPartition[nodeIndex]; adjacentIndex < graphAdjacencyPartition[nodeIndex + 1]; adjacentIndex++) { const uint32_t bondIndex = adjacentBondIndices[adjacentIndex]; NVBLAST_ASSERT(!isInvalidIndex(bondIndex)); if (bondHealths[bondIndex] > 0.0f) { bondHealths[bondIndex] = 0.0f; } } getFamilyGraph()->notifyNodeRemoved(actor->getIndex(), nodeIndex, &graph); } health -= command.health; const float remainingDamage = -health; if (remainingDamage > 0.0f) // node chunk has been damaged beyond its health { fractureSubSupportNoEvents(chunkIndex, m_asset->m_firstSubsupportChunkIndex, remainingDamage, subChunkHealths, chunks); } } } } } void FamilyHeader::fractureWithEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* commands, NvBlastChunkFractureData* events, uint32_t eventsSize, uint32_t* count, Actor* filterActor, NvBlastLog logFn) { const SupportGraph& graph = m_asset->m_graph; const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices(); float* bondHealths = getBondHealths(); float* chunkHealths = getLowerSupportChunkHealths(); float* subChunkHealths = getSubsupportChunkHealths(); const NvBlastChunk* chunks = m_asset->getChunks(); for (uint32_t i = 0; i < chunkFractureCount; ++i) { const NvBlastChunkFractureData& command = commands[i]; const uint32_t chunkIndex = command.chunkIndex; const uint32_t chunkHealthIndex = m_asset->getContiguousLowerSupportIndex(chunkIndex); NVBLAST_ASSERT(!isInvalidIndex(chunkHealthIndex)); if (isInvalidIndex(chunkHealthIndex)) { continue; } float& health = chunkHealths[chunkHealthIndex]; if (canTakeDamage(health) && command.health > 0.0f) { Actor* actor = getChunkActor(chunkIndex); if (filterActor && filterActor != actor) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: chunk fracture command corresponds to other actor, command is ignored."); } else if (actor) { const uint32_t nodeIndex = m_asset->getChunkToGraphNodeMap()[chunkIndex]; if (actor->getGraphNodeCount() > 1 && !isInvalidIndex(nodeIndex)) { for (uint32_t adjacentIndex = graphAdjacencyPartition[nodeIndex]; adjacentIndex < graphAdjacencyPartition[nodeIndex + 1]; adjacentIndex++) { const uint32_t bondIndex = adjacentBondIndices[adjacentIndex]; NVBLAST_ASSERT(!isInvalidIndex(bondIndex)); if (bondHealths[bondIndex] > 0.0f) { bondHealths[bondIndex] = 0.0f; } } getFamilyGraph()->notifyNodeRemoved(actor->getIndex(), nodeIndex, &graph); } health -= command.health; if (*count < eventsSize) { NvBlastChunkFractureData& outEvent = events[*count]; outEvent.userdata = chunks[chunkIndex].userData; outEvent.chunkIndex = chunkIndex; outEvent.health = health; } (*count)++; const float remainingDamage = -health; if (remainingDamage > 0.0f) // node chunk has been damaged beyond its health { fractureSubSupport(chunkIndex, m_asset->m_firstSubsupportChunkIndex, remainingDamage, subChunkHealths, chunks, events, count, eventsSize); } } } } } void FamilyHeader::fractureInPlaceEvents(uint32_t chunkFractureCount, NvBlastChunkFractureData* inoutbuffer, uint32_t eventsSize, uint32_t* count, Actor* filterActor, NvBlastLog logFn) { const SupportGraph& graph = m_asset->m_graph; const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices(); float* bondHealths = getBondHealths(); float* chunkHealths = getLowerSupportChunkHealths(); float* subChunkHealths = getSubsupportChunkHealths(); const NvBlastChunk* chunks = m_asset->getChunks(); // // First level Chunk Fractures // for (uint32_t i = 0; i < chunkFractureCount; ++i) { const NvBlastChunkFractureData& command = inoutbuffer[i]; const uint32_t chunkIndex = command.chunkIndex; const uint32_t chunkHealthIndex = m_asset->getContiguousLowerSupportIndex(chunkIndex); NVBLAST_ASSERT(!isInvalidIndex(chunkHealthIndex)); if (isInvalidIndex(chunkHealthIndex)) { continue; } float& health = chunkHealths[chunkHealthIndex]; if (canTakeDamage(health) && command.health > 0.0f) { Actor* actor = getChunkActor(chunkIndex); if (filterActor && filterActor != actor) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: chunk fracture command corresponds to other actor, command is ignored."); } else if (actor) { const uint32_t nodeIndex = m_asset->getChunkToGraphNodeMap()[chunkIndex]; if (actor->getGraphNodeCount() > 1 && !isInvalidIndex(nodeIndex)) { for (uint32_t adjacentIndex = graphAdjacencyPartition[nodeIndex]; adjacentIndex < graphAdjacencyPartition[nodeIndex + 1]; adjacentIndex++) { const uint32_t bondIndex = adjacentBondIndices[adjacentIndex]; NVBLAST_ASSERT(!isInvalidIndex(bondIndex)); if (bondHealths[bondIndex] > 0.0f) { bondHealths[bondIndex] = 0.0f; } } getFamilyGraph()->notifyNodeRemoved(actor->getIndex(), nodeIndex, &graph); } health -= command.health; NvBlastChunkFractureData& outEvent = inoutbuffer[(*count)++]; outEvent.userdata = chunks[chunkIndex].userData; outEvent.chunkIndex = chunkIndex; outEvent.health = health; } } } // // Hierarchical Chunk Fractures // uint32_t commandedChunkFractures = *count; for (uint32_t i = 0; i < commandedChunkFractures; ++i) { NvBlastChunkFractureData& event = inoutbuffer[i]; const uint32_t chunkIndex = event.chunkIndex; const float remainingDamage = -event.health; if (remainingDamage > 0.0f) // node chunk has been damaged beyond its health { fractureSubSupport(chunkIndex, m_asset->m_firstSubsupportChunkIndex, remainingDamage, subChunkHealths, chunks, inoutbuffer, count, eventsSize); } } } void FamilyHeader::applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands, Actor* filterActor, NvBlastLog logFn, NvBlastTimers* timers) { NVBLASTLL_CHECK(commands != nullptr, logFn, "NvBlastActorApplyFracture: NULL commands pointer input.", return); NVBLASTLL_CHECK(isValid(commands), logFn, "NvBlastActorApplyFracture: commands memory is NULL but size is > 0.", return); NVBLASTLL_CHECK(eventBuffers == nullptr || isValid(eventBuffers), logFn, "NvBlastActorApplyFracture: eventBuffers memory is NULL but size is > 0.", eventBuffers->bondFractureCount = 0; eventBuffers->chunkFractureCount = 0; return); #if NVBLASTLL_CHECK_PARAMS if (eventBuffers != nullptr && eventBuffers->bondFractureCount == 0 && eventBuffers->chunkFractureCount == 0) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers do not provide any space."); return; } #endif #if NV_PROFILE Time time; #else NV_UNUSED(timers); #endif // // Chunk Fracture // if (eventBuffers == nullptr || eventBuffers->chunkFractures == nullptr) { // immediate hierarchical fracture fractureNoEvents(commands->chunkFractureCount, commands->chunkFractures, filterActor, logFn); } else if (eventBuffers->chunkFractures != commands->chunkFractures) { // immediate hierarchical fracture uint32_t count = 0; fractureWithEvents(commands->chunkFractureCount, commands->chunkFractures, eventBuffers->chunkFractures, eventBuffers->chunkFractureCount, &count, filterActor, logFn); if (count > eventBuffers->chunkFractureCount) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers too small. Chunk events were lost."); } else { eventBuffers->chunkFractureCount = count; } } else if (eventBuffers->chunkFractures == commands->chunkFractures) { // compacting first uint32_t count = 0; fractureInPlaceEvents(commands->chunkFractureCount, commands->chunkFractures, eventBuffers->chunkFractureCount, &count, filterActor, logFn); if (count > eventBuffers->chunkFractureCount) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers too small. Chunk events were lost."); } else { eventBuffers->chunkFractureCount = count; } } // // Bond Fracture // uint32_t outCount = 0; const uint32_t eventBufferSize = eventBuffers ? eventBuffers->bondFractureCount : 0; NvBlastBond* bonds = m_asset->getBonds(); float* bondHealths = getBondHealths(); const uint32_t* graphChunkIndices = m_asset->m_graph.getChunkIndices(); for (uint32_t i = 0; i < commands->bondFractureCount; ++i) { const NvBlastBondFractureData& frac = commands->bondFractures[i]; NVBLAST_ASSERT(frac.nodeIndex0 < m_asset->m_graph.m_nodeCount); NVBLAST_ASSERT(frac.nodeIndex1 < m_asset->m_graph.m_nodeCount); uint32_t chunkIndex0 = graphChunkIndices[frac.nodeIndex0]; uint32_t chunkIndex1 = graphChunkIndices[frac.nodeIndex1]; NVBLAST_ASSERT(!isInvalidIndex(chunkIndex0) || !isInvalidIndex(chunkIndex1)); Actor* actor0 = !isInvalidIndex(chunkIndex0) ? getChunkActor(chunkIndex0) : nullptr; Actor* actor1 = !isInvalidIndex(chunkIndex1) ? getChunkActor(chunkIndex1) : nullptr; NVBLAST_ASSERT(actor0 != nullptr || actor1 != nullptr); // If actors are not nullptr and different then bond is already broken // One of actor can be nullptr which probably means it's 'world' node. if (actor0 == actor1 || actor0 == nullptr || actor1 == nullptr) { Actor* actor = actor0 ? actor0 : actor1; NVBLAST_ASSERT_WITH_MESSAGE(actor, "NvBlastActorApplyFracture: all actors in bond fracture command are nullptr, command will be safely ignored, but investigation is recommended."); if (filterActor && filterActor != actor) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: bond fracture command corresponds to other actor, command is ignored."); } else if (actor) { const uint32_t bondIndex = actor->damageBond(frac.nodeIndex0, frac.nodeIndex1, frac.health); if (!isInvalidIndex(bondIndex)) { if (eventBuffers && eventBuffers->bondFractures) { if (outCount < eventBufferSize) { NvBlastBondFractureData& outEvent = eventBuffers->bondFractures[outCount]; outEvent.userdata = bonds[bondIndex].userData; outEvent.nodeIndex0 = frac.nodeIndex0; outEvent.nodeIndex1 = frac.nodeIndex1; outEvent.health = bondHealths[bondIndex]; } } outCount++; } } } } if (eventBuffers && eventBuffers->bondFractures) { if (outCount > eventBufferSize) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorApplyFracture: eventBuffers too small. Bond events were lost."); } else { eventBuffers->bondFractureCount = outCount; } } #if NV_PROFILE if (timers != nullptr) { timers->fracture += time.getElapsedTicks(); } #endif } } // namespace Blast } // namespace Nv // API implementation extern "C" { NvBlastAssetMemSizeData NvBlastAssetMemSizeDataFromAsset(const NvBlastAsset* asset) { const Nv::Blast::Asset* solverAsset = reinterpret_cast<const Nv::Blast::Asset*>(asset); NvBlastAssetMemSizeData sizeData; if (solverAsset) { sizeData.bondCount = solverAsset->getBondCount(); sizeData.chunkCount = solverAsset->m_chunkCount; sizeData.nodeCount = solverAsset->m_graph.m_nodeCount; sizeData.lowerSupportChunkCount = solverAsset->getLowerSupportChunkCount(); sizeData.upperSupportChunkCount = solverAsset->getUpperSupportChunkCount(); } else { memset(&sizeData, 0, sizeof(NvBlastAssetMemSizeData)); } return sizeData; } NvBlastFamily* NvBlastAssetCreateFamily(void* mem, const NvBlastAsset* asset, NvBlastLog logFn) { return Nv::Blast::createFamily(mem, asset, logFn); } NvBlastFamily* NvBlastAssetCreateFamilyFromSizeData(void* mem, const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn) { return Nv::Blast::createFamily(mem, sizeData, logFn); } uint32_t NvBlastFamilyGetFormatVersion(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetFormatVersion: NULL family pointer input.", return UINT32_MAX); return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->formatVersion; } const NvBlastAsset* NvBlastFamilyGetAsset(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetAsset: NULL family pointer input.", return nullptr); return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->m_asset; } void NvBlastFamilySetAsset(NvBlastFamily* family, const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilySetAsset: NULL family pointer input.", return); NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastFamilySetAsset: NULL asset pointer input.", return); Nv::Blast::FamilyHeader* header = reinterpret_cast<Nv::Blast::FamilyHeader*>(family); const Nv::Blast::Asset* solverAsset = reinterpret_cast<const Nv::Blast::Asset*>(asset); if (memcmp(&header->m_assetID, &solverAsset->m_ID, sizeof(NvBlastID))) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastFamilySetAsset: wrong asset. Passed asset ID doesn't match family asset ID."); return; } header->m_asset = solverAsset; } uint32_t NvBlastFamilyGetSize(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetSize: NULL family pointer input.", return 0); return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->size; } NvBlastID NvBlastFamilyGetAssetID(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetAssetID: NULL family pointer input.", return NvBlastID()); return reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->m_assetID; } uint32_t NvBlastFamilyGetActorCount(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetActorCount: NULL family pointer input.", return 0); const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family); return header->m_actorCount; } uint32_t NvBlastFamilyGetActors(NvBlastActor** actors, uint32_t actorsSize, const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(actors != nullptr, logFn, "NvBlastFamilyGetActors: NULL actors pointer input.", return 0); NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetActors: NULL family pointer input.", return 0); const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family); // Iterate through active actors and write to supplied array const uint32_t familyActorCount = header->getActorsArraySize(); Nv::Blast::Actor* familyActor = header->getActors(); uint32_t actorCount = 0; for (uint32_t i = 0; actorCount < actorsSize && i < familyActorCount; ++i, ++familyActor) { if (familyActor->isActive()) { actors[actorCount++] = familyActor; } } return actorCount; } NvBlastActor* NvBlastFamilyGetActorByIndex(const NvBlastFamily* family, uint32_t actorIndex, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetActorByIndex: NULL family pointer input.", return nullptr); const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family); return header->getActorByIndex(actorIndex); } NvBlastActor* NvBlastFamilyGetChunkActor(const NvBlastFamily* family, uint32_t chunkIndex, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetChunkActor: NULL family pointer input.", return nullptr); const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family); NVBLASTLL_CHECK(header->m_asset != nullptr, logFn, "NvBlastFamilyGetChunkActor: NvBlastFamily has null asset set.", return nullptr); NVBLASTLL_CHECK(chunkIndex < header->m_asset->m_chunkCount, logFn, "NvBlastFamilyGetChunkActor: bad value of chunkIndex for the given family's asset.", return nullptr); return header->getChunkActor(chunkIndex); } uint32_t* NvBlastFamilyGetChunkActorIndices(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetChunkActorIndices: NULL family pointer input.", return nullptr); const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family); NVBLASTLL_CHECK(header->m_asset != nullptr, logFn, "NvBlastFamilyGetChunkActorIndices: NvBlastFamily has null asset set.", return nullptr); return header->getChunkActorIndices(); } uint32_t NvBlastFamilyGetMaxActorCount(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetMaxActorCount: NULL family pointer input.", return 0); const Nv::Blast::FamilyHeader* header = reinterpret_cast<const Nv::Blast::FamilyHeader*>(family); return header->getActorsArraySize(); } } // extern "C"
31,654
C++
40.379085
233
0.6699
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastActorSerializationBlock.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastActor.h" #include "NvBlastActorSerializationBlock.h" #include "NvBlastFamilyGraph.h" #include <algorithm> namespace Nv { namespace Blast { //////// Actor static methods for serialization //////// Actor* Actor::deserialize(NvBlastFamily* family, const void* buffer, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "Actor::deserialize: NULL family pointer input.", return nullptr); const ActorSerializationHeader* serHeader = reinterpret_cast<const ActorSerializationHeader*>(buffer); if (serHeader->m_formatVersion != ActorSerializationFormat::Current) { NVBLASTLL_LOG_ERROR(logFn, "Actor::deserialize: wrong data format. Serialization data must be converted to current version."); return nullptr; } FamilyHeader* header = reinterpret_cast<FamilyHeader*>(family); const Asset* asset = header->m_asset; const SupportGraph& graph = asset->m_graph; const uint32_t* graphChunkIndices = graph.getChunkIndices(); const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices(); const uint32_t* graphAdjacentBondIndices = graph.getAdjacentBondIndices(); Actor* actor = nullptr; const uint32_t actorIndex = serHeader->m_index; if (serHeader->m_index < header->getActorsArraySize()) { if (!header->getActors()[actorIndex].isActive()) { actor = header->borrowActor(serHeader->m_index); } } if (actor == nullptr) { NVBLASTLL_LOG_ERROR(logFn, "Actor::deserialize: invalid actor index in serialized data. Actor not created."); return nullptr; } // Commonly used data uint32_t* chunkActorIndices = header->getChunkActorIndices(); FamilyGraph* familyGraph = header->getFamilyGraph(); #if NVBLASTLL_CHECK_PARAMS { const uint32_t* serVisibleChunkIndices = serHeader->getVisibleChunkIndices(); for (uint32_t i = 0; i < serHeader->m_visibleChunkCount; ++i) { const uint32_t visibleChunkIndex = serVisibleChunkIndices[i]; if (!isInvalidIndex(chunkActorIndices[visibleChunkIndex])) { NVBLASTLL_LOG_ERROR(logFn, "Actor::deserialize: visible chunk already has an actor in family. Actor not created."); header->returnActor(*actor); return nullptr; } } } #endif // Visible chunk indices and chunk actor indices { // Add visible chunks, set chunk subtree ownership const uint32_t* serVisibleChunkIndices = serHeader->getVisibleChunkIndices(); IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks(); for (uint32_t i = serHeader->m_visibleChunkCount; i--;) // Reverse-order, so the resulting linked list is in the original order { const uint32_t visibleChunkIndex = serVisibleChunkIndices[i]; NVBLAST_ASSERT(isInvalidIndex(visibleChunkIndexLinks[visibleChunkIndex].m_adj[0]) && isInvalidIndex(visibleChunkIndexLinks[visibleChunkIndex].m_adj[1])); IndexDList<uint32_t>().insertListHead(actor->m_firstVisibleChunkIndex, visibleChunkIndexLinks, visibleChunkIndex); for (Asset::DepthFirstIt j(*asset, visibleChunkIndex, true); (bool)j; ++j) { NVBLAST_ASSERT(isInvalidIndex(chunkActorIndices[(uint32_t)j])); chunkActorIndices[(uint32_t)j] = actorIndex; } } actor->m_visibleChunkCount = serHeader->m_visibleChunkCount; } // Graph node indices, leaf chunk count, and and island IDs { // Add graph nodes const uint32_t* serGraphNodeIndices = serHeader->getGraphNodeIndices(); uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks(); uint32_t* islandIDs = familyGraph->getIslandIds(); for (uint32_t i = serHeader->m_graphNodeCount; i--;) // Reverse-order, so the resulting linked list is in the original order { const uint32_t graphNodeIndex = serGraphNodeIndices[i]; NVBLAST_ASSERT(isInvalidIndex(graphNodeIndexLinks[graphNodeIndex])); graphNodeIndexLinks[graphNodeIndex] = actor->m_firstGraphNodeIndex; actor->m_firstGraphNodeIndex = graphNodeIndex; islandIDs[graphNodeIndex] = actorIndex; } actor->m_graphNodeCount = serHeader->m_graphNodeCount; actor->m_leafChunkCount = serHeader->m_leafChunkCount; } // Using this function after the family graph data has been set up, so that it will work correctly const bool hasExternalBonds = actor->hasExternalBonds(); // Lower support chunk healths { const float* serLowerSupportChunkHealths = serHeader->getLowerSupportChunkHealths(); float* subsupportHealths = header->getSubsupportChunkHealths(); const uint32_t subsupportChunkCount = asset->getUpperSupportChunkCount(); if (actor->m_graphNodeCount > 0) { uint32_t serLowerSupportChunkCount = 0; float* graphNodeHealths = header->getLowerSupportChunkHealths(); for (Actor::GraphNodeIt i = *actor; (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex]; if (isInvalidIndex(supportChunkIndex)) { continue; } graphNodeHealths[graphNodeIndex] = serLowerSupportChunkHealths[serLowerSupportChunkCount++]; Asset::DepthFirstIt j(*asset, supportChunkIndex); NVBLAST_ASSERT((bool)j); ++j; // Skip first (support) chunk, it's already been handled for (; (bool)j; ++j) { subsupportHealths[(uint32_t)j] = serLowerSupportChunkHealths[serLowerSupportChunkCount++]; } } } else // Single subsupport chunk if (!isInvalidIndex(actor->m_firstVisibleChunkIndex)) { NVBLAST_ASSERT(actor->m_firstVisibleChunkIndex >= subsupportChunkCount); subsupportHealths[actor->m_firstVisibleChunkIndex - subsupportChunkCount] = *serLowerSupportChunkHealths; } } // Bond healths uint32_t serBondCount = 0; { const float* serBondHealths = serHeader->getBondHealths(); float* bondHealths = header->getBondHealths(); for (Actor::GraphNodeIt i = *actor; (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex) { const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex]; if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count { // Only count if the adjacent node belongs to this actor const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex]; if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == actorIndex)) { const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex]; bondHealths[adjacentBondIndex] = serBondHealths[serBondCount++]; } } } } } // Fast routes { const uint32_t* serFastRoute = serHeader->getFastRoute(); uint32_t* fastRoute = header->getFamilyGraph()->getFastRoute(); for (Actor::GraphNodeIt i = *actor; (bool)i; ++i) { fastRoute[(uint32_t)i] = *serFastRoute++; } } // Hop counts { const uint32_t* serHopCounts = serHeader->getHopCounts(); uint32_t* hopCounts = header->getFamilyGraph()->getHopCounts(); for (Actor::GraphNodeIt i = *actor; (bool)i; ++i) { hopCounts[(uint32_t)i] = *serHopCounts++; } } // Edge removed array if (serBondCount > 0) { uint32_t serBondIndex = 0; const FixedBoolArray* serEdgeRemovedArray = serHeader->getEdgeRemovedArray(); FixedBoolArray* edgeRemovedArray = familyGraph->getIsEdgeRemoved(); for (Actor::GraphNodeIt i = *actor; (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex) { const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex]; if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count { // Only count if the adjacent node belongs to this actor const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex]; if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == actorIndex)) { if (!serEdgeRemovedArray->test(serBondIndex)) { const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex]; edgeRemovedArray->reset(adjacentBondIndex); } ++serBondIndex; } } } } } return actor; } //////// Actor member methods for serialization //////// uint32_t Actor::serialize(void* buffer, uint32_t bufferSize, NvBlastLog logFn) const { // Set up pointers and such const Asset* asset = getAsset(); const SupportGraph& graph = asset->m_graph; const uint32_t* graphChunkIndices = graph.getChunkIndices(); const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices(); const uint32_t* graphAdjacentBondIndices = graph.getAdjacentBondIndices(); const FamilyHeader* header = getFamilyHeader(); const uint32_t* chunkActorIndices = header->getChunkActorIndices(); const uint32_t thisActorIndex = getIndex(); const bool hasExternalBonds = this->hasExternalBonds(); // Make sure there are no dirty nodes if (m_graphNodeCount) { const uint32_t* firstDirtyNodeIndices = header->getFamilyGraph()->getFirstDirtyNodeIndices(); if (!isInvalidIndex(firstDirtyNodeIndices[thisActorIndex])) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: instance graph has dirty nodes. Call Nv::Blast::Actor::findIslands before serializing."); return 0; } } uint64_t offset = 0; // Header ActorSerializationHeader* serHeader = reinterpret_cast<ActorSerializationHeader*>(buffer); offset = align16(sizeof(ActorSerializationHeader)); if (offset > bufferSize) { return 0; // Buffer size insufficient } serHeader->m_formatVersion = ActorSerializationFormat::Current; serHeader->m_size = 0; // Will be updated below serHeader->m_index = thisActorIndex; serHeader->m_visibleChunkCount = m_visibleChunkCount; serHeader->m_graphNodeCount = m_graphNodeCount; serHeader->m_leafChunkCount = m_leafChunkCount; // Visible chunk indices { serHeader->m_visibleChunkIndicesOffset = (uint32_t)offset; offset = align16(offset + m_visibleChunkCount*sizeof(uint32_t)); if (offset > bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } uint32_t* serVisibleChunkIndices = serHeader->getVisibleChunkIndices(); uint32_t serVisibleChunkCount = 0; for (Actor::VisibleChunkIt i = *this; (bool)i; ++i) { NVBLAST_ASSERT(serVisibleChunkCount < m_visibleChunkCount); serVisibleChunkIndices[serVisibleChunkCount++] = (uint32_t)i; } NVBLAST_ASSERT(serVisibleChunkCount == m_visibleChunkCount); } // Graph node indices { serHeader->m_graphNodeIndicesOffset = (uint32_t)offset; offset = align16(offset + m_graphNodeCount*sizeof(uint32_t)); if (offset > bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } uint32_t* serGraphNodeIndices = serHeader->getGraphNodeIndices(); uint32_t serGraphNodeCount = 0; for (Actor::GraphNodeIt i = *this; (bool)i; ++i) { NVBLAST_ASSERT(serGraphNodeCount < m_graphNodeCount); serGraphNodeIndices[serGraphNodeCount++] = (uint32_t)i; } NVBLAST_ASSERT(serGraphNodeCount == m_graphNodeCount); } // Lower support chunk healths { serHeader->m_lowerSupportChunkHealthsOffset = (uint32_t)offset; float* serLowerSupportChunkHealths = serHeader->getLowerSupportChunkHealths(); const float* subsupportHealths = header->getSubsupportChunkHealths(); const uint32_t subsupportChunkCount = asset->getUpperSupportChunkCount(); if (m_graphNodeCount > 0) { uint32_t serLowerSupportChunkCount = 0; const float* graphNodeHealths = header->getLowerSupportChunkHealths(); for (Actor::GraphNodeIt i = *this; (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex]; if (isInvalidIndex(supportChunkIndex)) { continue; } serLowerSupportChunkHealths[serLowerSupportChunkCount++] = graphNodeHealths[graphNodeIndex]; offset += sizeof(float); Asset::DepthFirstIt j(*asset, supportChunkIndex); NVBLAST_ASSERT((bool)j); ++j; // Skip first (support) chunk, it's already been handled for (; (bool)j; ++j) { if (offset >= bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } serLowerSupportChunkHealths[serLowerSupportChunkCount++] = subsupportHealths[(uint32_t)j - subsupportChunkCount]; offset += sizeof(float); } } } else // Single subsupport chunk if (!isInvalidIndex(m_firstVisibleChunkIndex)) { NVBLAST_ASSERT(m_firstVisibleChunkIndex >= subsupportChunkCount); if (offset >= bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } *serLowerSupportChunkHealths = subsupportHealths[m_firstVisibleChunkIndex - subsupportChunkCount]; offset += sizeof(float); } } offset = align16(offset); // Bond healths uint32_t serBondCount = 0; { serHeader->m_bondHealthsOffset = (uint32_t)offset; float* serBondHealths = serHeader->getBondHealths(); const float* bondHealths = header->getBondHealths(); for (Actor::GraphNodeIt i = *this; (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex) { const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex]; if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count { // Only count if the adjacent node belongs to this actor const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex]; if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == thisActorIndex)) { if (offset >= bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex]; serBondHealths[serBondCount++] = bondHealths[adjacentBondIndex]; offset += sizeof(float); } } } } } offset = align16(offset); // Fast routes { serHeader->m_fastRouteOffset = (uint32_t)offset; offset = align16(offset + m_graphNodeCount*sizeof(uint32_t)); if (offset > bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } uint32_t* serFastRoute = serHeader->getFastRoute(); const uint32_t* fastRoute = header->getFamilyGraph()->getFastRoute(); for (Actor::GraphNodeIt i = *this; (bool)i; ++i) { *serFastRoute++ = fastRoute[(uint32_t)i]; } } // Hop counts { serHeader->m_hopCountsOffset = (uint32_t)offset; offset = align16(offset + m_graphNodeCount*sizeof(uint32_t)); if (offset > bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } uint32_t* serHopCounts = serHeader->getHopCounts(); const uint32_t* hopCounts = header->getFamilyGraph()->getHopCounts(); for (Actor::GraphNodeIt i = *this; (bool)i; ++i) { *serHopCounts++ = hopCounts[(uint32_t)i]; } } // Edge removed array if (serBondCount > 0) { serHeader->m_edgeRemovedArrayOffset = (uint32_t)offset; offset = align16(offset + FixedBoolArray::requiredMemorySize(serBondCount)); if (offset > bufferSize) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::Actor::serialize: buffer size exceeded."); return 0; // Buffer size insufficient } uint32_t serBondIndex = 0; FixedBoolArray* serEdgeRemovedArray = serHeader->getEdgeRemovedArray(); new (serEdgeRemovedArray)FixedBoolArray(serBondCount); serEdgeRemovedArray->fill(); // Reset bits as we find bonds const FixedBoolArray* edgeRemovedArray = header->getFamilyGraph()->getIsEdgeRemoved(); for (Actor::GraphNodeIt i = *this; (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex) { const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex]; if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count { // Only count if the adjacent node belongs to this actor const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex]; if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == thisActorIndex)) { const uint32_t adjacentBondIndex = graphAdjacentBondIndices[adjacentIndex]; if (!edgeRemovedArray->test(adjacentBondIndex)) { serEdgeRemovedArray->reset(serBondIndex); } ++serBondIndex; } } } } } // Finally record size serHeader->m_size = static_cast<uint32_t>(offset); return serHeader->m_size; } uint32_t Actor::serializationRequiredStorage(NvBlastLog logFn) const { const Asset* asset = getAsset(); const SupportGraph& graph = asset->m_graph; const uint32_t* graphChunkIndices = graph.getChunkIndices(); const uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices(); const uint32_t* graphNodeIndexLinks = getFamilyHeader()->getGraphNodeIndexLinks(); const uint32_t* chunkActorIndices = getFamilyHeader()->getChunkActorIndices(); const uint32_t thisActorIndex = getIndex(); const bool hasExternalBonds = this->hasExternalBonds(); // Lower-support chunk count and bond counts for this actor need to be calculated. Iterate over all support chunks to count these. uint32_t lowerSupportChunkCount = 0; uint32_t bondCount = 0; if (m_graphNodeCount > 0) { for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = graphNodeIndexLinks[graphNodeIndex]) { // Update bond count for (uint32_t adjacentIndex = graphAdjacencyPartition[graphNodeIndex]; adjacentIndex < graphAdjacencyPartition[graphNodeIndex + 1]; ++adjacentIndex) { const uint32_t adjacentNodeIndex = graphAdjacentNodeIndices[adjacentIndex]; if (adjacentNodeIndex > graphNodeIndex) // So as not to double-count { // Only count if the adjacent node belongs to this actor or the world const uint32_t adjacentChunkIndex = graphChunkIndices[adjacentNodeIndex]; if ((hasExternalBonds && isInvalidIndex(adjacentChunkIndex)) || (!isInvalidIndex(adjacentChunkIndex) && chunkActorIndices[adjacentChunkIndex] == thisActorIndex)) { ++bondCount; } } } // Update lower-support chunk count const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex]; if (isInvalidIndex(supportChunkIndex)) { continue; } for (Asset::DepthFirstIt i(*asset, supportChunkIndex); (bool)i; ++i) { ++lowerSupportChunkCount; } } } else // Subsupport chunk { ++lowerSupportChunkCount; } const uint64_t dataSize = getActorSerializationSize(m_visibleChunkCount, lowerSupportChunkCount, m_graphNodeCount, bondCount); if (dataSize > UINT32_MAX) { NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::serializationRequiredStorage: Serialization block size exceeds 4GB. Returning 0.\n"); return 0; } return static_cast<uint32_t>(dataSize); } } // namespace Blast } // namespace Nv // API implementation extern "C" { uint32_t NvBlastActorGetSerializationSize(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetSerializationSize: NULL actor pointer input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetSerializationSize: inactive actor pointer input."); return 0; } return a.serializationRequiredStorage(logFn); } uint32_t NvBlastActorSerialize(void* buffer, uint32_t bufferSize, const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(buffer != nullptr, logFn, "NvBlastActorSerialize: NULL buffer pointer input.", return 0); NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorSerialize: NULL actor pointer input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorSerialize: inactive actor pointer input."); return 0; } return a.serialize(buffer, bufferSize, logFn); } NvBlastActor* NvBlastFamilyDeserializeActor(NvBlastFamily* family, const void* buffer, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyDeserializeActor: NULL family input. No actor deserialized.", return nullptr); NVBLASTLL_CHECK(buffer != nullptr, logFn, "NvBlastFamilyDeserializeActor: NULL buffer pointer input. No actor deserialized.", return nullptr); return Nv::Blast::Actor::deserialize(family, buffer, logFn); } } // extern "C"
26,762
C++
42.945813
181
0.631418
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastChunkHierarchy.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTCHUNKHIERARCHY_H #define NVBLASTCHUNKHIERARCHY_H #include "NvBlastIndexFns.h" #include "NvBlastDLink.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastIteratorBase.h" namespace Nv { namespace Blast { /** Chunk hierarchy depth-first iterator. Traverses subtree with root given by startChunkIndex. Will not traverse chunks with index at or beyond chunkIndexLimit. */ class ChunkDepthFirstIt : public IteratorBase<uint32_t> { public: /** Constructed from a chunk array. */ ChunkDepthFirstIt(const NvBlastChunk* chunks, uint32_t startChunkIndex, uint32_t chunkIndexLimit) : IteratorBase<uint32_t>(startChunkIndex), m_chunks(chunks), m_stop(startChunkIndex), m_limit(chunkIndexLimit) { if (m_curr >= m_limit) { m_curr = invalidIndex<uint32_t>(); } } /** Pre-increment. Only use if valid() == true. */ uint32_t operator ++ () { NVBLAST_ASSERT(!isInvalidIndex(m_curr)); const NvBlastChunk* chunk = m_chunks + m_curr; if (chunk->childIndexStop > chunk->firstChildIndex && chunk->firstChildIndex < m_limit) { m_curr = chunk->firstChildIndex; } else { for (;;) { if (m_curr == m_stop) { m_curr = invalidIndex<uint32_t>(); break; } NVBLAST_ASSERT(!isInvalidIndex(chunk->parentChunkIndex)); // This should not be possible with this search const NvBlastChunk* parentChunk = m_chunks + chunk->parentChunkIndex; if (++m_curr < parentChunk->childIndexStop) { break; // Sibling chunk is valid, that's the next chunk } m_curr = chunk->parentChunkIndex; chunk = parentChunk; } } return m_curr; } private: const NvBlastChunk* m_chunks; uint32_t m_stop; uint32_t m_limit; }; /** Enumerates chunk indices in a subtree with root given by chunkIndex, in breadth-first order. Will not traverse chunks with index at or beyond chunkIndexLimit. Returns the number of indices written to the chunkIndex array */ NV_INLINE uint32_t enumerateChunkHierarchyBreadthFirst ( uint32_t* chunkIndices, uint32_t chunkIndicesSize, const NvBlastChunk* chunks, uint32_t chunkIndex, bool includeRoot = true, uint32_t chunkIndexLimit = invalidIndex<uint32_t>() ) { if (chunkIndicesSize == 0) { return 0; } uint32_t chunkIndexCount = 0; bool rootHandled = false; if (includeRoot) { chunkIndices[chunkIndexCount++] = chunkIndex; rootHandled = true; } for (uint32_t curr = 0; !rootHandled || curr < chunkIndexCount;) { const NvBlastChunk& chunk = chunks[rootHandled ? chunkIndices[curr] : chunkIndex]; if (chunk.firstChildIndex < chunkIndexLimit) { const uint32_t childIndexStop = chunk.childIndexStop < chunkIndexLimit ? chunk.childIndexStop : chunkIndexLimit; const uint32_t childIndexBufferStop = chunk.firstChildIndex + (chunkIndicesSize - chunkIndexCount); const uint32_t stop = childIndexStop < childIndexBufferStop ? childIndexStop : childIndexBufferStop; for (uint32_t childIndex = chunk.firstChildIndex; childIndex < stop; ++childIndex) { chunkIndices[chunkIndexCount++] = childIndex; } } if (rootHandled) { ++curr; } rootHandled = true; } return chunkIndexCount; } /** VisibilityRep must have m_firstVisibleChunkIndex and m_visibleChunkCount fields */ template<class VisibilityRep> void updateVisibleChunksFromSupportChunk ( VisibilityRep* actors, IndexDLink<uint32_t>* visibleChunkIndexLinks, uint32_t* chunkActorIndices, uint32_t actorIndex, uint32_t supportChunkIndex, const NvBlastChunk* chunks, uint32_t upperSupportChunkCount ) { uint32_t chunkIndex = supportChunkIndex; uint32_t chunkActorIndex = chunkActorIndices[supportChunkIndex]; uint32_t newChunkActorIndex = actorIndex; VisibilityRep& thisActor = actors[actorIndex]; do { if (chunkActorIndex == newChunkActorIndex) { break; // Nothing to do } const uint32_t parentChunkIndex = chunks[chunkIndex].parentChunkIndex; const uint32_t parentChunkActorIndex = parentChunkIndex != invalidIndex<uint32_t>() ? chunkActorIndices[parentChunkIndex] : invalidIndex<uint32_t>(); const bool chunkVisible = chunkActorIndex != parentChunkActorIndex; // If the chunk is visible, it needs to be removed from its old actor's visibility list if (chunkVisible && !isInvalidIndex(chunkActorIndex)) { VisibilityRep& chunkActor = actors[chunkActorIndex]; IndexDList<uint32_t>().removeFromList(chunkActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, chunkIndex); --chunkActor.m_visibleChunkCount; } // Now update the chunk's actor index const uint32_t oldChunkActorIndex = chunkActorIndices[chunkIndex]; chunkActorIndices[chunkIndex] = newChunkActorIndex; if (newChunkActorIndex != invalidIndex<uint32_t>() && parentChunkActorIndex != newChunkActorIndex) { // The chunk is now visible. Add it to this actor's visibility list IndexDList<uint32_t>().insertListHead(thisActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, chunkIndex); ++thisActor.m_visibleChunkCount; // Remove its children from this actor's visibility list if (actorIndex != oldChunkActorIndex) { const NvBlastChunk& chunk = chunks[chunkIndex]; if (chunk.firstChildIndex < upperSupportChunkCount) // Only need to deal with upper-support children { for (uint32_t childChunkIndex = chunk.firstChildIndex; childChunkIndex < chunk.childIndexStop; ++childChunkIndex) { if (chunkActorIndices[childChunkIndex] == actorIndex) { IndexDList<uint32_t>().removeFromList(thisActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, childChunkIndex); --thisActor.m_visibleChunkCount; } } } } } if (parentChunkIndex != invalidIndex<uint32_t>()) { // If all of its siblings have the same index, then the parent will too. Otherwise, the parent will have an invalid index and its children will be visible const NvBlastChunk& parentChunk = chunks[parentChunkIndex]; bool uniform = true; for (uint32_t childChunkIndex = parentChunk.firstChildIndex; uniform && childChunkIndex < parentChunk.childIndexStop; ++childChunkIndex) { uniform = (newChunkActorIndex == chunkActorIndices[childChunkIndex]); } if (!uniform) { newChunkActorIndex = invalidIndex<uint32_t>(); // no need to search if the parent index is invalid // the conditional in the loop could never be true in that case if (parentChunkActorIndex != invalidIndex<uint32_t>()) { for (uint32_t childChunkIndex = parentChunk.firstChildIndex; childChunkIndex < parentChunk.childIndexStop; ++childChunkIndex) { const uint32_t childChunkActorIndex = chunkActorIndices[childChunkIndex]; if (childChunkActorIndex != invalidIndex<uint32_t>() && childChunkActorIndex == parentChunkActorIndex) { // The child was invisible. Add it to its actor's visibility list VisibilityRep& childChunkActor = actors[childChunkActorIndex]; IndexDList<uint32_t>().insertListHead(childChunkActor.m_firstVisibleChunkIndex, visibleChunkIndexLinks, childChunkIndex); ++childChunkActor.m_visibleChunkCount; } } } } } // Climb the hierarchy chunkIndex = parentChunkIndex; chunkActorIndex = parentChunkActorIndex; } while (chunkIndex != invalidIndex<uint32_t>()); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTCHUNKHIERARCHY_H
10,213
C
38.898437
167
0.643592
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastSupportGraph.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTSUPPORTGRAPH_H #define NVBLASTSUPPORTGRAPH_H #include "NvBlastIndexFns.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { /** Describes the connectivity between support chunks via bonds. Vertices in the support graph are termed "nodes," and represent particular chunks (NvBlastChunk) in an NvBlastAsset. The indexing for nodes is not the same as that for chunks. Only some chunks are represented by nodes in the graph, and these chunks are called "support chunks." Adjacent node indices and adjacent bond indices are stored for each node, and therefore each bond is represented twice in this graph, going from node[i] -> node[j] and from node[j] -> node[i]. Therefore the size of the getAdjacentNodeIndices() and getAdjacentBondIndices() arrays are twice the number of bonds stored in the corresponding NvBlastAsset. The graph is used as follows. Given a SupportGraph "graph" and node index i, (0 <= i < graph.nodeCount), one may find all adjacent bonds and nodes using: const uint32_t* adjacencyPartition = graph.getAdjacencyPartition(); const uint32_t* adjacentNodeIndices = graph.getAdjacentNodeIndices(); const uint32_t* adjacentBondIndices = graph.getAdjacentBondIndices(); // adj is the lookup value in adjacentNodeIndices and graph.getAdjacentBondIndices() for (uint32_t adj = adjacencyPartition[i]; adj < adjacencyPartition[i+1]; ++adj) { // An adjacent node: uint32_t adjacentNodeIndex = adjacentNodeIndices[adj]; // The corresponding bond (that connects node index i with node indexed adjacentNodeIndex: uint32_t adjacentBondIndex = adjacentBondIndices[adj]; } For a graph node with index i, the corresponding asset chunk index is found using graph.getChunkIndices()[i]. The reverse mapping (obtaining a graph node index from an asset chunk index) can be done using the NvBlastAssetGetChunkToGraphNodeMap(asset, logFn); function. See the documentation for its use. The returned "node index" for a non-support chunk is the invalid value 0xFFFFFFFF. */ struct SupportGraph { /** Total number of nodes in the support graph. */ uint32_t m_nodeCount; /** Indices of chunks represented by the nodes. getChunkIndices returns an array of size m_nodeCount. */ NvBlastBlockArrayData(uint32_t, m_chunkIndicesOffset, getChunkIndices, m_nodeCount); /** Adjacency lookup table, of type uint32_t. Partitions both the getAdjacentNodeIndices() and the getAdjacentBondIndices() arrays into subsets corresponding to each node. The size of this array is nodeCount+1. For 0 <= i < nodeCount, getAdjacencyPartition()[i] is the index of the first element in getAdjacentNodeIndices() (or getAdjacentBondIndices()) for nodes adjacent to the node with index i. getAdjacencyPartition()[nodeCount] is the size of the getAdjacentNodeIndices() and getAdjacentBondIndices() arrays. This allows one to easily count the number of nodes adjacent to a node with index i, using getAdjacencyPartition()[i+1] - getAdjacencyPartition()[i]. getAdjacencyPartition returns an array of size m_nodeCount + 1. */ NvBlastBlockArrayData(uint32_t, m_adjacencyPartitionOffset, getAdjacencyPartition, m_nodeCount + 1); /** Array of uint32_t composed of subarrays holding the indices of nodes adjacent to a given node. The subarrays may be accessed through the getAdjacencyPartition() array. getAdjacentNodeIndices returns an array of size getAdjacencyPartition()[m_nodeCount]. */ NvBlastBlockArrayData(uint32_t, m_adjacentNodeIndicesOffset, getAdjacentNodeIndices, getAdjacencyPartition()[m_nodeCount]); /** Array of uint32_t composed of subarrays holding the indices of bonds (NvBlastBond) for a given node. The subarrays may be accessed through the getAdjacencyPartition() array. getAdjacentBondIndices returns an array of size getAdjacencyPartition()[m_nodeCount]. */ NvBlastBlockArrayData(uint32_t, m_adjacentBondIndicesOffset, getAdjacentBondIndices, getAdjacencyPartition()[m_nodeCount]); /** Finds the bond between two given graph nodes (if it exists) and returns the bond index. If no bond exists, returns invalidIndex<uint32_t>(). \return the index of the bond between the given nodes. */ uint32_t findBond(uint32_t nodeIndex0, uint32_t nodeIndex1) const; }; //////// SupportGraph inline member functions //////// NV_INLINE uint32_t SupportGraph::findBond(uint32_t nodeIndex0, uint32_t nodeIndex1) const { const uint32_t* adjacencyPartition = getAdjacencyPartition(); const uint32_t* adjacentNodeIndices = getAdjacentNodeIndices(); const uint32_t* adjacentBondIndices = getAdjacentBondIndices(); // Iterate through all neighbors of nodeIndex0 chunk for (uint32_t i = adjacencyPartition[nodeIndex0]; i < adjacencyPartition[nodeIndex0 + 1]; i++) { if (adjacentNodeIndices[i] == nodeIndex1) { return adjacentBondIndices[i]; } } return invalidIndex<uint32_t>(); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTSUPPORTGRAPH_H
6,716
C
43.483443
191
0.749702
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastActor.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTACTOR_H #define NVBLASTACTOR_H #include "NvBlastAsset.h" #include "NvBlastDLink.h" #include "NvBlastIteratorBase.h" #include "NvBlastSupportGraph.h" #include "NvBlastFamilyGraph.h" #include "NvBlastPreprocessorInternal.h" #include <cstring> namespace Nv { namespace Blast { // Forward declarations class FamilyGraph; struct FamilyHeader; /** Internal implementation of solver actor. These objects are stored within the family in a single array. A pointer to a Actor class will be given to the user through the NvBlastActor opaque type. */ class Actor : public NvBlastActor { friend struct FamilyHeader; friend void updateVisibleChunksFromSupportChunk<>(Actor*, IndexDLink<uint32_t>*, uint32_t*, uint32_t, uint32_t, const NvBlastChunk*, uint32_t); public: Actor() : m_familyOffset(0), m_firstVisibleChunkIndex(UINT32_MAX), m_visibleChunkCount(0), m_firstGraphNodeIndex(UINT32_MAX), m_graphNodeCount(0), m_leafChunkCount(0) {} //////// Accessors //////// /** Find the family (see FamilyHeader) that this actor belongs to. \return a pointer to the FamilyHeader for this actor. */ FamilyHeader* getFamilyHeader() const; /** Utility to get the asset this actor is associated with, through its family. \return the asset associated with this actor. */ const Asset* getAsset() const; /** Since this object is not deleted (unless the family is deleted), we use m_familyOffset to determine if the actor is valid, or "active." When no actors in an instance return isActive(), it should be safe to delete the family. \return true iff this actor is valid for use (active). */ bool isActive() const; /** Whether or not this actor represents a subsupport chunk. If the actor contains a subsupport chunk, then it can have only that chunk. \return true iff this actor contains a chunk which is a descendant of a support chunk. */ bool isSubSupportChunk() const; /** Whether or not this actor represents a single support chunk. If the actor contains a single support chunk, it can have no other chunks associated with it. \return true iff this actor contains exactly one support chunk. */ bool isSingleSupportChunk() const; /** Utility to calculate actor index. \return the index of this actor in the FamilyHeader's getActors() array. */ uint32_t getIndex() const; /** Offset to block of memory which holds the data associated with all actors in this actor's lineage \return the family offset. */ uint32_t getFamilyOffset() const; void setFamilyOffset(uint32_t familyOffset); /** The number of visible chunks. This is calculated from updateVisibleChunksFromGraphNodes(). See also getFirstVisibleChunkIndex. \return the number of chunks in the actor's visible chunk index list. */ uint32_t getVisibleChunkCount() const; void setVisibleChunkCount(uint32_t visibleChunkCount); /** Access to visible chunk linked list for this actor. The index returned is that of a link in the FamilyHeader's getVisibleChunkIndexLinks(). \return the index of the head of the visible chunk linked list. */ uint32_t getFirstVisibleChunkIndex() const; void setFirstVisibleChunkIndex(uint32_t firstVisibleChunkIndex); /** The number of graph nodes, corresponding to support chunks, for this actor. See also getFirstGraphNodeIndex. \return the number of graph nodes in the actor's graph node index list. */ uint32_t getGraphNodeCount() const; void setGraphNodeCount(uint32_t graphNodeCount); /** The number of leaf chunks for this actor. \return number of leaf chunks for this actor. */ uint32_t getLeafChunkCount() const; void setLeafChunkCount(uint32_t leafChunkCount); /** Access to graph node linked list for this actor. The index returned is that of a link in the FamilyHeader's getGraphNodeIndexLinks(). \return the index of the head of the graph node linked list. */ uint32_t getFirstGraphNodeIndex() const; void setFirstGraphNodeIndex(uint32_t firstGraphNodeIndex); /** Access to the index of the first subsupport chunk. \return the index of the first subsupport chunk. */ uint32_t getFirstSubsupportChunkIndex() const; /** Access to the support graph. \return the support graph associated with this actor. */ const SupportGraph* getGraph() const; /** Access the instance graph for islands searching. Return the dynamic data generated for the support graph. (See FamilyGraph.) This is used to store current connectivity information based upon bond and chunk healths, as well as cached intermediate data for faster incremental updates. */ FamilyGraph* getFamilyGraph() const; /** Access to the chunks, of type NvBlastChunk. \return an array of size m_chunkCount. */ NvBlastChunk* getChunks() const; /** Access to the bonds, of type NvBlastBond. \return an array of size m_bondCount. */ NvBlastBond* getBonds() const; /** Access to the health for each support chunk and subsupport chunk, of type float. Use getAsset()->getContiguousLowerSupportIndex() to map lower-support chunk indices into the range of indices valid for this array. \return a float array of chunk healths. */ float* getLowerSupportChunkHealths() const; /** Access to the start of the subsupport chunk health array. \return the array of health values associated with all descendants of support chunks. */ float* getSubsupportChunkHealths() const; /** Bond health for the interfaces between two chunks, of type float. Since the bond is shared by two chunks, the same bond health is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i]. \return the array of healths associated with all bonds in the support graph. */ float* getBondHealths() const; /** Graph node index links, of type uint32_t. The successor to index[i] is m_graphNodeIndexLinksOffset[i]. A value of invalidIndex<uint32_t>() indicates no successor. getGraphNodeIndexLinks returns an array of size m_asset->m_graphNodeCount. */ const uint32_t* getGraphNodeIndexLinks() const; //////// Iterators //////// /** Visible chunk iterator. Usage: Given a solver actor a, for (Actor::VisibleChunkIt i = a; (bool)i; ++i) { uint32_t visibleChunkIndex = (uint32_t)i; // visibleChunkIndex references the asset index list } */ class VisibleChunkIt : public DListIt<uint32_t> { public: /** Constructed from an actor. */ VisibleChunkIt(const Actor& actor); }; /** Graph node iterator. Usage: Given a solver actor a, for (Actor::GraphNodeIt i = a; (bool)i; ++i) { uint32_t graphNodeIndex = (uint32_t)i; // graphNodeIndex references the asset's graph node index list } */ class GraphNodeIt : public LListIt<uint32_t> { public: /** Constructed from an actor. */ GraphNodeIt(const Actor& actor); }; //////// Operations //////// /** Create an actor from a descriptor (creates a family). This actor will represent an unfractured instance of the asset. The asset must be in a valid state, for example each chunk hierarchy in it must contain at least one support chunk (a single support chunk in a hierarchy corresponds to the root chunk). This will always be the case for assets created by NvBlastCreateAsset. \param[in] family Family in which to create a new actor. The family must be valid and have no other actors in it. (See createFamily.) \param[in] desc Actor initialization data, must be a valid pointer. \param[in] scratch User-supplied scratch memory of size createRequiredScratch(desc) bytes. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the new actor if the input is valid (by the conditions described above), NULL otherwise. */ static Actor* create(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn); /** Returns the size of the scratch space (in bytes) required to be passed into the create function, based upon the family that will be passed to the create function. \param[in] family The family being instanced. \return the number of bytes required. */ static size_t createRequiredScratch(const NvBlastFamily* family, NvBlastLog logFn); /** Deserialize a single Actor from a buffer. An actor family must given, into which the actor will be inserted if it is compatible. That is, it must not share any chunks or internal IDs with the actors already present in the block. \param[in] family Family in which to deserialize the actor. \param[in] buffer Buffer containing the serialized actor data. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the deserialized actor if successful, NULL otherwise. */ static Actor* deserialize(NvBlastFamily* family, const void* buffer, NvBlastLog logFn); /** Serialize actor into single-actor buffer. \param[out] buffer User-supplied buffer, must be at least of size given by NvBlastActorGetSerializationSize(actor). \param[in] bufferSize The size of the user-supplied buffer. The buffer size must be less than 4GB. If NvBlastActorGetSerializationSize(actor) >= 4GB, this actor cannot be serialized with this method. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of bytes written to the buffer, or 0 if there is an error (such as an under-sized buffer). */ uint32_t serialize(void* buffer, uint32_t bufferSize, NvBlastLog logFn) const; /** Calculate the space required to serialize this actor. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the required buffer size in bytes. */ uint32_t serializationRequiredStorage(NvBlastLog logFn) const; /** Release this actor's association with a family, if any. This actor should be considered deleted after this function is called. \return true if release was successful (actor was active). */ bool release(); //////// Damage and fracturing methods //////// /** See NvBlastActorGenerateFracture */ void generateFracture(NvBlastFractureBuffers* commandBuffers, const NvBlastDamageProgram& program, const void* programParams, NvBlastLog logFn, NvBlastTimers* timers) const; /** Damage bond between two chunks by health amount (instance graph also will be notified in case bond is broken after). */ uint32_t damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, float healthDamage); /** TODO: document */ void damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, uint32_t bondIndex, float healthDamage); /** TODO: document */ uint32_t damageBond(const NvBlastBondFractureData& cmd); /** See NvBlastActorApplyFracture */ void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands, NvBlastLog logFn, NvBlastTimers* timers); /** The scratch space required to call the findIslands function, or the split function, in bytes. \return the number of bytes required. */ size_t splitRequiredScratch() const; /** See NvBlastActorSplit */ uint32_t split(NvBlastActorSplitEvent* result, uint32_t newActorsMaxCount, void* scratch, NvBlastLog logFn, NvBlastTimers* timers); /** Perform islands search. Bonds which are broken when their health values drop to zero (or below) may lead to new islands of chunks which need to be split into new actors. This function labels all nodes in the instance graph (see FamilyGraph) with a unique index per island that may be used as actor indices for new islands. \param[in] scratch User-supplied scratch memory of size splitRequiredScratch(). \return the number of new islands found. */ uint32_t findIslands(void* scratch); /** Partition this actor into smaller pieces. If this actor represents a single support or subsupport chunk, then after this operation this actor will released if child chunks are created (see Return value), and its pointer no longer valid for use (unless it appears in the newActors list). This function will not split a leaf chunk actor. In that case, the actor is not destroyed and this function returns 0. \param[in] newActors user-supplied array of actor pointers to hold the actors generated from this partitioning. This array must be of size equal to the number of leaf chunks in the asset, to guarantee that all actors are reported. (See AssetDataHeader::m_leafChunkCount.) \param[in] newActorsSize The size of the user-supplied newActors array. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of new actors created. If greater than newActorsSize, some actors are not reported in the newActors array. */ uint32_t partition(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn); /** Recalculate the visible chunk list for this actor based upon it graph node list (does not modify subsupport chunk actors) */ void updateVisibleChunksFromGraphNodes(); /** Partition this actor into smaller pieces if it is a single lower-support chunk actor. Use this function on single support or sub-support chunks. After this operation, if successful (child chunks created, see Return value), this actor will released, and its pointer no longer valid for use. This function will not split a leaf chunk actor. In that case, the actor is not destroyed and this function returns 0. \param[in] newActors User-supplied array of actor pointers to hold the actors generated from this partitioning. Note: this actor will be released. This array must be of size equal to the lower-support chunk's child count, to guarantee that all actors are reported. \param[in] newActorsSize The size of the user-supplied newActors array. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of new actors created. */ uint32_t partitionSingleLowerSupportChunk(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn); /** Partition this actor into smaller pieces. Use this function if this actor contains more than one support chunk. After this operation, if successful, this actor will released, and its pointer no longer valid for use (unless it appears in the newActors list). \param[in] newActors User-supplied array of actor pointers to hold the actors generated from this partitioning. Note: this actor will not be released, but will hold a subset of the graph nodes that it had before the function was called. This array must be of size equal to the number of graph nodes in the asset, to guarantee that all actors are reported. \param[in] newActorsSize The size of the user-supplied newActors array. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the number of new actors created. */ uint32_t partitionMultipleGraphNodes(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn); /** \return true iff this actor contains the "external" support graph node, created when a bond contains the invalidIndex<uint32_t>() value for one of their chunkIndices. */ bool hasExternalBonds() const; /** \return true iff this actor was damaged and split() call is required. */ bool isSplitRequired() const; private: //////// Data //////// /** Offset to block of memory which holds the data associated with all actors in this actor's lineage. This offset is positive. The block address is this object's pointer _minus_ the m_familyOffset. This value is initialized to 0, which denotes an invalid actor. Actors should be obtained through the FamilyHeader::borrowActor API, which will create a valid offset, and the FamilyHeader::returnActor API, which will zero the offset. */ uint32_t m_familyOffset; /** The index of the head of a doubly-linked list of visible chunk indices. If m_firstVisibleChunkIndex == invalidIndex<uint32_t>(), then there are no visible chunks. */ uint32_t m_firstVisibleChunkIndex; /** The number of elements in the visible chunk list. */ uint32_t m_visibleChunkCount; /** The index of the head of a singly-linked list of graph node indices. If m_firstGraphNodeIndex == invalidIndex<uint32_t>(), then there are no graph nodes. */ uint32_t m_firstGraphNodeIndex; /** The number of elements in the graph node list. */ uint32_t m_graphNodeCount; /** The number of leaf chunks in this actor. */ uint32_t m_leafChunkCount; }; } // namespace Blast } // namespace Nv #include "NvBlastFamily.h" namespace Nv { namespace Blast { //////// Actor inline methods //////// NV_INLINE FamilyHeader* Actor::getFamilyHeader() const { NVBLAST_ASSERT(isActive()); return isActive() ? (FamilyHeader*)((uintptr_t)this - (uintptr_t)m_familyOffset) : nullptr; } NV_INLINE const Asset* Actor::getAsset() const { return getFamilyHeader()->m_asset; } NV_INLINE bool Actor::isActive() const { return m_familyOffset != 0; } NV_INLINE bool Actor::isSubSupportChunk() const { return m_graphNodeCount == 0; } NV_INLINE bool Actor::isSingleSupportChunk() const { return m_graphNodeCount == 1; } NV_INLINE uint32_t Actor::getIndex() const { NVBLAST_ASSERT(isActive()); const FamilyHeader* header = getFamilyHeader(); NVBLAST_ASSERT(header != nullptr); const size_t index = this - header->getActors(); NVBLAST_ASSERT(index <= UINT32_MAX); return (uint32_t)index; } NV_INLINE uint32_t Actor::getFamilyOffset() const { return m_familyOffset; } NV_INLINE void Actor::setFamilyOffset(uint32_t familyOffset) { m_familyOffset = familyOffset; } NV_INLINE uint32_t Actor::getVisibleChunkCount() const { return m_visibleChunkCount; } NV_INLINE void Actor::setVisibleChunkCount(uint32_t visibleChunkCount) { m_visibleChunkCount = visibleChunkCount; } NV_INLINE uint32_t Actor::getFirstVisibleChunkIndex() const { return m_firstVisibleChunkIndex; } NV_INLINE void Actor::setFirstVisibleChunkIndex(uint32_t firstVisibleChunkIndex) { m_firstVisibleChunkIndex = firstVisibleChunkIndex; } NV_INLINE uint32_t Actor::getGraphNodeCount() const { return m_graphNodeCount; } NV_INLINE void Actor::setGraphNodeCount(uint32_t graphNodeCount) { m_graphNodeCount = graphNodeCount; } NV_INLINE uint32_t Actor::getLeafChunkCount() const { return m_leafChunkCount; } NV_INLINE void Actor::setLeafChunkCount(uint32_t leafChunkCount) { m_leafChunkCount = leafChunkCount; } NV_INLINE uint32_t Actor::getFirstGraphNodeIndex() const { return m_firstGraphNodeIndex; } NV_INLINE void Actor::setFirstGraphNodeIndex(uint32_t firstGraphNodeIndex) { m_firstGraphNodeIndex = firstGraphNodeIndex; } NV_INLINE uint32_t Actor::getFirstSubsupportChunkIndex() const { return getAsset()->m_firstSubsupportChunkIndex; } NV_INLINE const SupportGraph* Actor::getGraph() const { return &getAsset()->m_graph; } NV_INLINE FamilyGraph* Actor::getFamilyGraph() const { return getFamilyHeader()->getFamilyGraph(); } NV_INLINE NvBlastChunk* Actor::getChunks() const { return getAsset()->getChunks(); } NV_INLINE NvBlastBond* Actor::getBonds() const { return getAsset()->getBonds(); } NV_INLINE float* Actor::getLowerSupportChunkHealths() const { return getFamilyHeader()->getLowerSupportChunkHealths(); } NV_INLINE float* Actor::getSubsupportChunkHealths() const { return getFamilyHeader()->getSubsupportChunkHealths(); } NV_INLINE float* Actor::getBondHealths() const { return getFamilyHeader()->getBondHealths(); } NV_INLINE const uint32_t* Actor::getGraphNodeIndexLinks() const { return getFamilyHeader()->getGraphNodeIndexLinks(); } NV_INLINE bool Actor::release() { // Do nothing if this actor is not currently active. if (!isActive()) { return false; } FamilyHeader* header = getFamilyHeader(); // Clear the graph node list uint32_t* graphNodeIndexLinks = getFamilyHeader()->getGraphNodeIndexLinks(); while (!isInvalidIndex(m_firstGraphNodeIndex)) { const uint32_t graphNodeIndex = m_firstGraphNodeIndex; m_firstGraphNodeIndex = graphNodeIndexLinks[m_firstGraphNodeIndex]; graphNodeIndexLinks[graphNodeIndex] = invalidIndex<uint32_t>(); --m_graphNodeCount; } NVBLAST_ASSERT(m_graphNodeCount == 0); const Asset* asset = getAsset(); // Clear the visible chunk list IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks(); uint32_t* chunkActorIndices = header->getChunkActorIndices(); while (!isInvalidIndex(m_firstVisibleChunkIndex)) { // Descendants of the visible actor may be accessed again if the actor is deserialized. Clear subtree. for (Asset::DepthFirstIt i(*asset, m_firstVisibleChunkIndex, true); (bool)i; ++i) { chunkActorIndices[(uint32_t)i] = invalidIndex<uint32_t>(); } IndexDList<uint32_t>().removeListHead(m_firstVisibleChunkIndex, visibleChunkIndexLinks); --m_visibleChunkCount; } NVBLAST_ASSERT(m_visibleChunkCount == 0); // Clear the leaf chunk count m_leafChunkCount = 0; // This invalidates the actor and decrements the reference count header->returnActor(*this); return true; } NV_INLINE uint32_t Actor::partition(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn) { NVBLASTLL_CHECK(newActorsSize == 0 || newActors != nullptr, logFn, "Nv::Blast::Actor::partition: NULL newActors pointer array input with non-zero newActorCount.", return 0); // Call one of two partition functions depending on the actor's support status return m_graphNodeCount <= 1 ? partitionSingleLowerSupportChunk(newActors, newActorsSize, logFn) : // This actor will partition into subsupport chunks partitionMultipleGraphNodes(newActors, newActorsSize, logFn); // This actor will partition into support chunks } NV_INLINE bool Actor::hasExternalBonds() const { const SupportGraph& graph = *getGraph(); if (graph.m_nodeCount == 0) { return false; // This shouldn't happen } const uint32_t lastGraphChunkIndex = graph.getChunkIndices()[graph.m_nodeCount - 1]; if (!isInvalidIndex(lastGraphChunkIndex)) { return false; // There is no external node } return getFamilyGraph()->getIslandIds()[graph.m_nodeCount - 1] == getIndex(); } NV_INLINE bool Actor::isSplitRequired() const { NVBLAST_ASSERT(isActive()); if (getGraphNodeCount() <= 1) { uint32_t chunkHealthIndex = isSingleSupportChunk() ? getIndex() : getFirstVisibleChunkIndex() - getFirstSubsupportChunkIndex() + getGraph()->m_nodeCount; float* chunkHealths = getLowerSupportChunkHealths(); if (chunkHealths[chunkHealthIndex] <= 0.0f) { const uint32_t chunkIndex = m_graphNodeCount == 0 ? m_firstVisibleChunkIndex : getGraph()->getChunkIndices()[m_firstGraphNodeIndex]; if (!isInvalidIndex(chunkIndex)) { const NvBlastChunk& chunk = getChunks()[chunkIndex]; uint32_t childCount = chunk.childIndexStop - chunk.firstChildIndex; return childCount > 0; } } } else { uint32_t* firstDirtyNodeIndices = getFamilyGraph()->getFirstDirtyNodeIndices(); if (!isInvalidIndex(firstDirtyNodeIndices[getIndex()])) { return true; } } return false; } //////// Actor::VisibleChunkIt inline methods //////// NV_INLINE Actor::VisibleChunkIt::VisibleChunkIt(const Actor& actor) : DListIt<uint32_t>(actor.m_firstVisibleChunkIndex, actor.getFamilyHeader()->getVisibleChunkIndexLinks()) { } //////// Actor::GraphNodeIt inline methods //////// NV_INLINE Actor::GraphNodeIt::GraphNodeIt(const Actor& actor) : LListIt<uint32_t>(actor.m_firstGraphNodeIndex, actor.getFamilyHeader()->getGraphNodeIndexLinks()) { } //////// Helper functions //////// #if NVBLASTLL_CHECK_PARAMS /** Helper function to validate fracture buffer values being meaningful. */ static inline bool isValid(const NvBlastFractureBuffers* buffers) { if (buffers->chunkFractureCount != 0 && buffers->chunkFractures == nullptr) return false; if (buffers->bondFractureCount != 0 && buffers->bondFractures == nullptr) return false; return true; } #endif } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTACTOR_H
27,675
C
33.294919
208
0.689106
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastActor.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastActor.h" #include "NvBlastFamilyGraph.h" #include "NvBlastChunkHierarchy.h" #include "NvBlastIndexFns.h" #include "NvBlastDLink.h" #include "NvBlastGeometry.h" #include "NvBlastTime.h" #include <float.h> #include <algorithm> namespace Nv { namespace Blast { //////// Actor static methods //////// size_t Actor::createRequiredScratch(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr && reinterpret_cast<const FamilyHeader*>(family)->m_asset != nullptr, logFn, "Actor::createRequiredScratch: NULL family input or asset.", return 0); const Asset& solverAsset = *reinterpret_cast<const FamilyHeader*>(family)->m_asset; return FamilyGraph::findIslandsRequiredScratch(solverAsset.m_graph.m_nodeCount); } Actor* Actor::create(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "Actor::create: NULL family pointer input.", return nullptr); NVBLASTLL_CHECK(reinterpret_cast<FamilyHeader*>(family)->m_asset != nullptr, logFn, "Actor::create: family has NULL asset.", return nullptr); NVBLASTLL_CHECK(reinterpret_cast<FamilyHeader*>(family)->m_asset->m_graph.m_nodeCount != 0, logFn, "Actor::create: family's asset has no support chunks.", return nullptr); NVBLASTLL_CHECK(desc != nullptr, logFn, "Actor::create: NULL desc pointer input.", return nullptr); NVBLASTLL_CHECK(scratch != nullptr, logFn, "Actor::create: NULL scratch input.", return nullptr); FamilyHeader* header = reinterpret_cast<FamilyHeader*>(family); if (header->m_actorCount > 0) { NVBLASTLL_LOG_ERROR(logFn, "Actor::create: input family is not empty."); return nullptr; } const Asset& solverAsset = *static_cast<const Asset*>(header->m_asset); const SupportGraph& graph = solverAsset.m_graph; // Lower support chunk healths - initialize float* lowerSupportChunkHealths = header->getLowerSupportChunkHealths(); if (desc->initialSupportChunkHealths != nullptr) // Health array given { const uint32_t* supportChunkIndices = graph.getChunkIndices(); for (uint32_t supportChunkNum = 0; supportChunkNum < graph.m_nodeCount; ++supportChunkNum) { const float initialHealth = desc->initialSupportChunkHealths[supportChunkNum]; for (Asset::DepthFirstIt i(solverAsset, supportChunkIndices[supportChunkNum]); (bool)i; ++i) { lowerSupportChunkHealths[solverAsset.getContiguousLowerSupportIndex((uint32_t)i)] = initialHealth; } } } else // Use uniform initialization { const uint32_t lowerSupportChunkCount = solverAsset.getLowerSupportChunkCount(); for (uint32_t i = 0; i < lowerSupportChunkCount; ++i) { lowerSupportChunkHealths[i] = desc->uniformInitialLowerSupportChunkHealth; } } // Bond healths - initialize const uint32_t bondCount = solverAsset.getBondCount(); float* bondHealths = header->getBondHealths(); if (desc->initialBondHealths != nullptr) // Health array given { memcpy(bondHealths, desc->initialBondHealths, bondCount * sizeof(float)); } else // Use uniform initialization { for (uint32_t bondNum = 0; bondNum < bondCount; ++bondNum) { bondHealths[bondNum] = desc->uniformInitialBondHealth; } } // Get first actor - NOTE: we don't send an event for this! May need to do so for consistency. Actor* actor = header->borrowActor(0); // Using actor[0] // Fill in actor fields actor->m_firstGraphNodeIndex = 0; actor->m_graphNodeCount = graph.m_nodeCount; actor->m_leafChunkCount = solverAsset.m_leafChunkCount; // Graph node index links - initialize to chain uint32_t* graphNodeLinks = header->getGraphNodeIndexLinks(); for (uint32_t i = 0; i < graph.m_nodeCount - 1; ++i) { graphNodeLinks[i] = i + 1; } graphNodeLinks[graph.m_nodeCount - 1] = invalidIndex<uint32_t>(); // Update visible chunks (we assume that all chunks belong to one actor at the beginning) actor->updateVisibleChunksFromGraphNodes(); // Initialize instance graph with this actor header->getFamilyGraph()->initialize(actor->getIndex(), &graph); // Call findIslands to set up the internal instance graph data header->getFamilyGraph()->findIslands(actor->getIndex(), scratch, &graph); return actor; } //////// Actor member methods //////// uint32_t Actor::damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, float healthDamage) { const uint32_t bondIndex = getGraph()->findBond(nodeIndex0, nodeIndex1); damageBond(nodeIndex0, nodeIndex1, bondIndex, healthDamage); return bondIndex; } void Actor::damageBond(uint32_t nodeIndex0, uint32_t nodeIndex1, uint32_t bondIndex, float healthDamage) { if (bondIndex == invalidIndex<uint32_t>()) { NVBLAST_ALWAYS_ASSERT(); return; } float* bondHealths = getBondHealths(); if (canTakeDamage(bondHealths[bondIndex]) && healthDamage > 0.0f) { // Subtract health bondHealths[bondIndex] -= healthDamage; // Was removed? if (bondHealths[bondIndex] <= 0.0f) { // Notify graph that bond was removed getFamilyGraph()->notifyEdgeRemoved(getIndex(), nodeIndex0, nodeIndex1, bondIndex, getGraph()); bondHealths[bondIndex] = 0.0f; // Doing this for single-actor serialization consistency; should not actually be necessary } } } uint32_t Actor::damageBond(const NvBlastBondFractureData& cmd) { NVBLAST_ASSERT(!isInvalidIndex(cmd.nodeIndex1)); return damageBond(cmd.nodeIndex0, cmd.nodeIndex1, cmd.health); } void Actor::generateFracture(NvBlastFractureBuffers* commandBuffers, const NvBlastDamageProgram& program, const void* programParams, NvBlastLog logFn, NvBlastTimers* timers) const { NVBLASTLL_CHECK(commandBuffers != nullptr, logFn, "Actor::generateFracture: NULL commandBuffers pointer input.", return); NVBLASTLL_CHECK(isValid(commandBuffers), logFn, "NvBlastActorGenerateFracture: commandBuffers memory is NULL but size is > 0.", commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = 0; return); #if NVBLASTLL_CHECK_PARAMS if (commandBuffers->bondFractureCount == 0 && commandBuffers->chunkFractureCount == 0) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorGenerateFracture: commandBuffers do not provide any space."); return; } #endif #if NV_PROFILE Time time; #else NV_UNUSED(timers); #endif const SupportGraph* graph = getGraph(); const uint32_t graphNodeCount = getGraphNodeCount(); if (graphNodeCount > 1 && program.graphShaderFunction != nullptr) { const NvBlastGraphShaderActor shaderActor = { getIndex(), getGraphNodeCount(), graph->m_nodeCount, getFirstGraphNodeIndex(), getGraphNodeIndexLinks(), graph->getChunkIndices(), graph->getAdjacencyPartition(), graph->getAdjacentNodeIndices(), graph->getAdjacentBondIndices(), getBonds(), getChunks(), getBondHealths(), getLowerSupportChunkHealths(), getFamilyHeader()->getFamilyGraph()->getIslandIds() }; program.graphShaderFunction(commandBuffers, &shaderActor, programParams); } else if (graphNodeCount <= 1 && program.subgraphShaderFunction != nullptr) { const NvBlastSubgraphShaderActor shaderActor = { // The conditional (visible vs. support chunk) is needed because we allow single-child chunk chains // This makes it possible that an actor with a single support chunk will have a different visible chunk (ancestor of the support chunk) graphNodeCount == 1 ? graph->getChunkIndices()[getFirstGraphNodeIndex()] : getFirstVisibleChunkIndex(), getChunks() }; program.subgraphShaderFunction(commandBuffers, &shaderActor, programParams); } else { commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = 0; } #if NV_PROFILE if (timers != nullptr) { timers->material += time.getElapsedTicks(); } #endif } size_t Actor::splitRequiredScratch() const { // Scratch is reused, just need the max of these two values return std::max(m_graphNodeCount * sizeof(uint32_t), static_cast<size_t>(FamilyGraph::findIslandsRequiredScratch(getGraph()->m_nodeCount))); } uint32_t Actor::split(NvBlastActorSplitEvent* result, uint32_t newActorsMaxCount, void* scratch, NvBlastLog logFn, NvBlastTimers* timers) { NVBLASTLL_CHECK(result != nullptr, logFn, "Actor::split: NULL result pointer input.", return 0); NVBLASTLL_CHECK(newActorsMaxCount > 0 && result->newActors != nullptr, logFn, "NvBlastActorSplit: no space for results provided.", return 0); NVBLASTLL_CHECK(scratch != nullptr, logFn, "Actor::split: NULL scratch pointer input.", return 0); #if NV_PROFILE Time time; #else NV_UNUSED(timers); #endif Actor** newActors = reinterpret_cast<Actor**>(result->newActors); uint32_t actorsCount = 0; if (getGraphNodeCount() <= 1) { uint32_t chunkHealthIndex = isSingleSupportChunk() ? getIndex() : getFirstVisibleChunkIndex() - getFirstSubsupportChunkIndex() + getGraph()->m_nodeCount; float* chunkHealths = getLowerSupportChunkHealths(); if (chunkHealths[chunkHealthIndex] <= 0.0f) { actorsCount = partitionSingleLowerSupportChunk(newActors, newActorsMaxCount, logFn); for (uint32_t i = 0; i < actorsCount; ++i) { Actor* newActor = newActors[i]; uint32_t firstVisible = newActor->getFirstVisibleChunkIndex(); uint32_t firstSub = newActor->getFirstSubsupportChunkIndex(); uint32_t nodeCount = newActor->getGraph()->m_nodeCount; uint32_t newActorIndex = newActor->getIndex(); uint32_t healthIndex = newActor->isSubSupportChunk() ? firstVisible - firstSub + nodeCount : newActorIndex; if (chunkHealths[healthIndex] <= 0.0f) { uint32_t brittleActors = newActors[i]->partitionSingleLowerSupportChunk(&newActors[actorsCount], newActorsMaxCount - actorsCount, logFn); actorsCount += brittleActors; if (brittleActors > 0) { actorsCount--; newActors[i] = newActors[actorsCount]; i--; } } } } #if NV_PROFILE if (timers != nullptr) { timers->partition += time.getElapsedTicks(); } #endif } else { findIslands(scratch); #if NV_PROFILE if (timers != nullptr) { timers->island += time.getElapsedTicks(); } #endif // Reuse scratch for node list uint32_t* graphNodeIndexList = reinterpret_cast<uint32_t*>(scratch); // Get the family header FamilyHeader* header = getFamilyHeader(); NVBLAST_ASSERT(header != nullptr); // If m_actorEntryDataIndex is valid, this should be too // Record nodes in this actor before splitting const uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks(); // Get the links for the graph nodes uint32_t graphNodeIndexCount = 0; for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = graphNodeIndexLinks[graphNodeIndex]) { if (graphNodeIndexCount >= m_graphNodeCount) { // Safety, splitRequiredScratch() only guarantees m_graphNodeCount elements. In any case, this condition shouldn't happen. NVBLAST_ASSERT(graphNodeIndexCount < m_graphNodeCount); break; } graphNodeIndexList[graphNodeIndexCount++] = graphNodeIndex; } actorsCount = partitionMultipleGraphNodes(newActors, newActorsMaxCount, logFn); if (actorsCount > 1) { #if NV_PROFILE if (timers != nullptr) { timers->partition += time.getElapsedTicks(); } #endif // Get various pointers and values to iterate const Asset* asset = getAsset(); Actor* actors = header->getActors(); IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks(); uint32_t* chunkActorIndices = header->getChunkActorIndices(); const SupportGraph& graph = asset->m_graph; const uint32_t* graphChunkIndices = graph.getChunkIndices(); const NvBlastChunk* chunks = asset->getChunks(); const uint32_t upperSupportChunkCount = asset->getUpperSupportChunkCount(); const uint32_t* familyGraphIslandIDs = header->getFamilyGraph()->getIslandIds(); // Iterate over all graph nodes and update visible chunk lists for (uint32_t graphNodeNum = 0; graphNodeNum < graphNodeIndexCount; ++graphNodeNum) { const uint32_t graphNodeIndex = graphNodeIndexList[graphNodeNum]; const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex]; if (!isInvalidIndex(supportChunkIndex)) // Invalid if this is the world chunk { updateVisibleChunksFromSupportChunk<Actor>(actors, visibleChunkIndexLinks, chunkActorIndices, familyGraphIslandIDs[graphNodeIndex], graphChunkIndices[graphNodeIndex], chunks, upperSupportChunkCount); } } // Remove actors with no visible chunks - this can happen if we've split such that the world node is by itself uint32_t actualActorsCount = 0; for (uint32_t i = 0; i < actorsCount; ++i) { newActors[actualActorsCount] = newActors[i]; if (newActors[actualActorsCount]->getVisibleChunkCount() > 0) { ++actualActorsCount; } else { header->returnActor(*newActors[actualActorsCount]); } } actorsCount = actualActorsCount; #if NV_PROFILE if (timers != nullptr) { timers->visibility += time.getElapsedTicks(); } #endif // NOTE: we MUST use header->getLowerSupportChunkHealths() instead of just getLowerSupportChunkHealths() here, // since this actor has been made inactive at this point. Therefore Actor::getLowerSupportChunkHealths() will return // garbage since it calls getFamilyHeader() which does not return a valid header if the actor is not active. const float* chunkHealths = header->getLowerSupportChunkHealths(); for (uint32_t i = 0; i < actorsCount; ++i) { Actor* newActor = newActors[i]; if (newActor->getGraphNodeCount() <= 1) { const uint32_t firstVisible = newActor->getFirstVisibleChunkIndex(); const uint32_t firstSub = newActor->getFirstSubsupportChunkIndex(); const uint32_t assetNodeCount = newActor->getGraph()->m_nodeCount; const uint32_t newActorIndex = newActor->getIndex(); const uint32_t healthIndex = newActor->isSubSupportChunk() ? firstVisible - firstSub + assetNodeCount : newActorIndex; // this relies on visibility updated, subsupport actors only have m_firstVisibleChunkIndex to identify the chunk if (chunkHealths[healthIndex] <= 0.0f) { const uint32_t brittleActors = newActor->partitionSingleLowerSupportChunk(&newActors[actorsCount], newActorsMaxCount - actorsCount, logFn); actorsCount += brittleActors; if (brittleActors > 0) { actorsCount--; newActors[i] = newActors[actorsCount]; i--; } } } } #if NV_PROFILE if (timers != nullptr) { timers->partition += time.getElapsedTicks(); } #endif } else { actorsCount = 0; } } result->deletedActor = actorsCount == 0 ? nullptr : this; return actorsCount; } uint32_t Actor::findIslands(void* scratch) { return getFamilyHeader()->getFamilyGraph()->findIslands(getIndex(), scratch, &getAsset()->m_graph); } uint32_t Actor::partitionMultipleGraphNodes(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn) { NVBLAST_ASSERT(newActorsSize == 0 || newActors != nullptr); // Check for single subsupport chunk, no partitioning if (m_graphNodeCount <= 1) { NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionMultipleGraphNodes: actor is a single lower-support chunk, and cannot be partitioned by this function."); return 0; } FamilyHeader* header = getFamilyHeader(); NVBLAST_ASSERT(header != nullptr); // If m_actorEntryDataIndex is valid, this should be too // Get the links for the graph nodes uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks(); // Get the graph chunk indices and leaf chunk counts const Asset* asset = getAsset(); const uint32_t* graphChunkIndices = asset->m_graph.getChunkIndices(); const uint32_t* subtreeLeafChunkCounts = asset->getSubtreeLeafChunkCounts(); // Distribute graph nodes to new actors uint32_t newActorCount = 0; const uint32_t thisActorIndex = getIndex(); m_leafChunkCount = 0; const uint32_t* islandIDs = header->getFamilyGraph()->getIslandIds(); uint32_t lastGraphNodeIndex = invalidIndex<uint32_t>(); uint32_t nextGraphNodeIndex = invalidIndex<uint32_t>(); bool overflow = false; for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = nextGraphNodeIndex) { nextGraphNodeIndex = graphNodeIndexLinks[graphNodeIndex]; const uint32_t islandID = islandIDs[graphNodeIndex]; if (islandID == thisActorIndex) { const uint32_t graphChunkIndex = graphChunkIndices[graphNodeIndex]; if (!isInvalidIndex(graphChunkIndex)) // Invalid if this is the world chunk { m_leafChunkCount += subtreeLeafChunkCounts[graphChunkIndex]; } lastGraphNodeIndex = graphNodeIndex; continue; // Leave the chunk in this actor } // Remove link from this actor if (isInvalidIndex(lastGraphNodeIndex)) { m_firstGraphNodeIndex = nextGraphNodeIndex; } else { graphNodeIndexLinks[lastGraphNodeIndex] = nextGraphNodeIndex; } graphNodeIndexLinks[graphNodeIndex] = invalidIndex<uint32_t>(); --m_graphNodeCount; // See if the chunk had been removed if (islandID == invalidIndex<uint32_t>()) { continue; } // Get new actor if the islandID is valid Actor* newActor = header->borrowActor(islandID); // Check new actor to see if we're adding the first chunk if (isInvalidIndex(newActor->m_firstGraphNodeIndex)) { // See if we can fit it in the output list if (newActorCount < newActorsSize) { newActors[newActorCount++] = newActor; } else { overflow = true; } } // Put link in new actor graphNodeIndexLinks[graphNodeIndex] = newActor->m_firstGraphNodeIndex; newActor->m_firstGraphNodeIndex = graphNodeIndex; ++newActor->m_graphNodeCount; // Add to the actor's leaf chunk count const uint32_t graphChunkIndex = graphChunkIndices[graphNodeIndex]; if (!isInvalidIndex(graphChunkIndex)) // Invalid if this is the world chunk { newActor->m_leafChunkCount += subtreeLeafChunkCounts[graphChunkIndex]; } } if (m_graphNodeCount > 0) { // There are still chunks in this actor. See if we can fit this in the output list. if (newActorCount < newActorsSize) { newActors[newActorCount++] = this; } else { overflow = true; } } else { // No more chunks; release this actor. release(); } if (overflow) { NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionMultipleGraphNodes: input newActors array could not hold all actors generated."); } return newActorCount; } uint32_t Actor::partitionSingleLowerSupportChunk(Actor** newActors, uint32_t newActorsSize, NvBlastLog logFn) { NVBLAST_ASSERT(newActorsSize == 0 || newActors != nullptr); // Ensure this is a single subsupport chunk, no partitioning if (m_graphNodeCount > 1) { NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionSingleLowerSupportChunk: actor is not a single lower-support chunk, and cannot be partitioned by this function."); return 0; } FamilyHeader* header = getFamilyHeader(); // The conditional (visible vs. support chunk) is needed because we allow single-child chunk chains // This makes it possible that an actor with a single support chunk will have a different visible chunk (ancestor of the support chunk) const uint32_t chunkIndex = m_graphNodeCount == 0 ? m_firstVisibleChunkIndex : getGraph()->getChunkIndices()[m_firstGraphNodeIndex]; if (isInvalidIndex(chunkIndex)) { return 0; // This actor has no chunks; only a graph node representing the world } NVBLAST_ASSERT(isInvalidIndex(header->getVisibleChunkIndexLinks()[chunkIndex].m_adj[1])); const NvBlastChunk& chunk = header->m_asset->getChunks()[chunkIndex]; uint32_t childCount = chunk.childIndexStop - chunk.firstChildIndex; // Warn if we cannot fit all child chunks in the output list if (childCount > newActorsSize) { NVBLASTLL_LOG_WARNING(logFn, "Nv::Blast::Actor::partitionSingleLowerSupportChunk: input newActors array will not hold all actors generated."); childCount = newActorsSize; } // Return if no chunks will be created. if (childCount == 0) { return 0; } // Activate a new actor for every child chunk const Asset* asset = getAsset(); const NvBlastChunk* chunks = asset->getChunks(); const uint32_t firstChildIndex = chunks[chunkIndex].firstChildIndex; for (uint32_t i = 0; i < childCount; ++i) { const uint32_t childIndex = firstChildIndex + i; NVBLAST_ASSERT(childIndex >= asset->m_firstSubsupportChunkIndex); const uint32_t actorIndex = asset->m_graph.m_nodeCount + (childIndex - asset->m_firstSubsupportChunkIndex); NVBLAST_ASSERT(!header->isActorActive(actorIndex)); newActors[i] = header->borrowActor(actorIndex); newActors[i]->m_firstVisibleChunkIndex = childIndex; newActors[i]->m_visibleChunkCount = 1; newActors[i]->m_leafChunkCount = asset->getSubtreeLeafChunkCounts()[childIndex]; } // Release this actor release(); return childCount; } void Actor::updateVisibleChunksFromGraphNodes() { // Only apply this to upper-support chunk actors if (m_graphNodeCount == 0) { return; } const Asset* asset = getAsset(); const uint32_t thisActorIndex = getIndex(); // Get various arrays FamilyHeader* header = getFamilyHeader(); Actor* actors = header->getActors(); IndexDLink<uint32_t>* visibleChunkIndexLinks = header->getVisibleChunkIndexLinks(); uint32_t* chunkActorIndices = header->getChunkActorIndices(); const SupportGraph& graph = asset->m_graph; const uint32_t* graphChunkIndices = graph.getChunkIndices(); const NvBlastChunk* chunks = asset->getChunks(); const uint32_t upperSupportChunkCount = asset->getUpperSupportChunkCount(); // Iterate over all graph nodes and update visible chunk list const uint32_t* graphNodeIndexLinks = header->getGraphNodeIndexLinks(); for (uint32_t graphNodeIndex = m_firstGraphNodeIndex; !isInvalidIndex(graphNodeIndex); graphNodeIndex = graphNodeIndexLinks[graphNodeIndex]) { const uint32_t supportChunkIndex = graphChunkIndices[graphNodeIndex]; if (!isInvalidIndex(supportChunkIndex)) // Invalid if this is the world chunk { updateVisibleChunksFromSupportChunk<Actor>(actors, visibleChunkIndexLinks, chunkActorIndices, thisActorIndex, graphChunkIndices[graphNodeIndex], chunks, upperSupportChunkCount); } } } } // namespace Blast } // namespace Nv // API implementation extern "C" { NvBlastActor* NvBlastFamilyCreateFirstActor(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyCreateFirstActor: NULL family input.", return nullptr); NVBLASTLL_CHECK(desc != nullptr, logFn, "NvBlastFamilyCreateFirstActor: NULL desc input.", return nullptr); NVBLASTLL_CHECK(scratch != nullptr, logFn, "NvBlastFamilyCreateFirstActor: NULL scratch input.", return nullptr); return Nv::Blast::Actor::create(family, desc, scratch, logFn); } size_t NvBlastFamilyGetRequiredScratchForCreateFirstActor(const NvBlastFamily* family, NvBlastLog logFn) { NVBLASTLL_CHECK(family != nullptr, logFn, "NvBlastFamilyGetRequiredScratchForCreateFirstActor: NULL family input.", return 0); NVBLASTLL_CHECK(reinterpret_cast<const Nv::Blast::FamilyHeader*>(family)->m_asset != nullptr, logFn, "NvBlastFamilyGetRequiredScratchForCreateFirstActor: family has NULL asset.", return 0); return Nv::Blast::Actor::createRequiredScratch(family, logFn); } bool NvBlastActorDeactivate(NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorDeactivate: NULL actor input.", return false); Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastActorDeactivate: inactive actor input."); } return a.release(); } uint32_t NvBlastActorGetVisibleChunkCount(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetVisibleChunkCount: NULL actor input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetVisibleChunkCount: inactive actor input."); return 0; } return a.getVisibleChunkCount(); } uint32_t NvBlastActorGetVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize, const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(visibleChunkIndices != nullptr, logFn, "NvBlastActorGetVisibleChunkIndices: NULL visibleChunkIndices pointer input.", return 0); NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetVisibleChunkIndices: NULL actor pointer input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetVisibleChunkIndices: inactive actor pointer input."); return 0; } // Iterate through visible chunk list and write to supplied array uint32_t indexCount = 0; for (Nv::Blast::Actor::VisibleChunkIt i = a; indexCount < visibleChunkIndicesSize && (bool)i; ++i) { visibleChunkIndices[indexCount++] = (uint32_t)i; } return indexCount; } uint32_t NvBlastActorGetGraphNodeCount(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetGraphNodeCount: NULL actor pointer input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetGraphNodeCount: inactive actor pointer input."); return 0; } return a.getGraphNodeCount(); } uint32_t NvBlastActorGetGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize, const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(graphNodeIndices != nullptr, logFn, "NvBlastActorGetGraphNodeIndices: NULL graphNodeIndices pointer input.", return 0); NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetGraphNodeIndices: NULL actor pointer input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetGraphNodeIndices: inactive actor pointer input."); return 0; } // Iterate through graph node list and write to supplied array const uint32_t* graphChunkIndices = a.getAsset()->m_graph.getChunkIndices(); uint32_t indexCount = 0; for (Nv::Blast::Actor::GraphNodeIt i = a; indexCount < graphNodeIndicesSize && (bool)i; ++i) { const uint32_t graphNodeIndex = (uint32_t)i; if (!Nv::Blast::isInvalidIndex(graphChunkIndices[graphNodeIndex])) { graphNodeIndices[indexCount++] = graphNodeIndex; } } return indexCount; } const float* NvBlastActorGetBondHealths(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetBondHealths: NULL actor pointer input.", return nullptr); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetBondHealths: inactive actor pointer input."); return nullptr; } return a.getFamilyHeader()->getBondHealths(); } const float* NvBlastActorGetCachedBondHeaths(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetCachedBondHeaths: NULL actor pointer input.", return nullptr); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetCachedBondHeaths: inactive actor pointer input."); return nullptr; } return a.getFamilyHeader()->getCachedBondHealths(); } bool NvBlastActorCacheBondHeath(const NvBlastActor* actor, uint32_t bondIndex, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorCacheBondHeath: NULL actor pointer input.", return nullptr); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorCacheBondHeath: inactive actor pointer input."); return false; } // copy the value over from the current bond health Nv::Blast::FamilyHeader* familyHeader = a.getFamilyHeader(); const float curHealth = familyHeader->getBondHealths()[bondIndex]; familyHeader->getCachedBondHealths()[bondIndex] = curHealth; return true; } NvBlastFamily* NvBlastActorGetFamily(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetFamily: NULL actor pointer input.", return nullptr); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetFamily: inactive actor pointer input."); return nullptr; } return reinterpret_cast<NvBlastFamily*>(a.getFamilyHeader()); } uint32_t NvBlastActorGetIndex(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetIndex: NULL actor pointer input.", return Nv::Blast::invalidIndex<uint32_t>()); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetIndex: actor is not active."); return Nv::Blast::invalidIndex<uint32_t>(); } return a.getIndex(); } void NvBlastActorGenerateFracture ( NvBlastFractureBuffers* commandBuffers, const NvBlastActor* actor, const NvBlastDamageProgram program, const void* programParams, NvBlastLog logFn, NvBlastTimers* timers ) { NVBLASTLL_CHECK(commandBuffers != nullptr, logFn, "NvBlastActorGenerateFracture: NULL commandBuffers pointer input.", return); NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGenerateFracture: NULL actor pointer input.", return); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGenerateFracture: actor is not active."); commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = 0; return; } a.generateFracture(commandBuffers, program, programParams, logFn, timers); } void NvBlastActorApplyFracture ( NvBlastFractureBuffers* eventBuffers, NvBlastActor* actor, const NvBlastFractureBuffers* commands, NvBlastLog logFn, NvBlastTimers* timers ) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorApplyFracture: NULL actor pointer input.", return); NVBLASTLL_CHECK(commands != nullptr, logFn, "NvBlastActorApplyFracture: NULL commands pointer input.", return); NVBLASTLL_CHECK(Nv::Blast::isValid(commands), logFn, "NvBlastActorApplyFracture: commands memory is NULL but size is > 0.", return); Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorApplyFracture: actor is not active."); if (eventBuffers != nullptr) { eventBuffers->bondFractureCount = 0; eventBuffers->chunkFractureCount = 0; } return; } a.getFamilyHeader()->applyFracture(eventBuffers, commands, &a, logFn, timers); } size_t NvBlastActorGetRequiredScratchForSplit(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetRequiredScratchForSplit: NULL actor input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetRequiredScratchForSplit: actor is not active."); return 0; } return a.splitRequiredScratch(); } uint32_t NvBlastActorGetMaxActorCountForSplit(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorGetMaxActorCountForSplit: NULL actor input.", return 0); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetMaxActorCountForSplit: actor is not active."); return 0; } return a.getLeafChunkCount() + 1; // GWD-167 workaround (+1) } uint32_t NvBlastActorSplit ( NvBlastActorSplitEvent* result, NvBlastActor* actor, uint32_t newActorsMaxCount, void* scratch, NvBlastLog logFn, NvBlastTimers* timers ) { NVBLASTLL_CHECK(result != nullptr, logFn, "NvBlastActorSplit: NULL result pointer input.", return 0); NVBLASTLL_CHECK(newActorsMaxCount > 0 && result->newActors != nullptr, logFn, "NvBlastActorSplit: no space for results provided.", return 0); NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorSplit: NULL actor pointer input.", return 0); NVBLASTLL_CHECK(scratch != nullptr, logFn, "NvBlastActorSplit: NULL scratch pointer input.", return 0); Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorGetIndex: actor is not active."); return 0; } return a.split(result, newActorsMaxCount, scratch, logFn, timers); } bool NvBlastActorCanFracture(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorCanFracture: NULL actor input.", return false); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorCanFracture: actor is not active."); return false; } bool canFracture = true; uint32_t graphNodeCount = a.getGraphNodeCount(); if (graphNodeCount < 2) { uint32_t chunkHealthIndex = graphNodeCount == 0 ? a.getFirstVisibleChunkIndex() - a.getFirstSubsupportChunkIndex() + a.getGraph()->m_nodeCount : a.getFirstGraphNodeIndex(); canFracture = (a.getLowerSupportChunkHealths()[chunkHealthIndex] > 0.0f); } return canFracture; } bool NvBlastActorHasExternalBonds(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorHasExternalBonds: NULL actor input.", return false); return static_cast<const Nv::Blast::Actor*>(actor)->hasExternalBonds(); } bool NvBlastActorIsSplitRequired(const NvBlastActor* actor, NvBlastLog logFn) { NVBLASTLL_CHECK(actor != nullptr, logFn, "NvBlastActorIsSplitRequired: NULL actor input.", return false); const Nv::Blast::Actor& a = *static_cast<const Nv::Blast::Actor*>(actor); if (!a.isActive()) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastActorIsSplitRequired: actor is not active."); return false; } return a.isSplitRequired(); } } // extern "C"
39,574
C++
36.37016
219
0.667307
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastAssetHelper.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastAsset.h" #include "NvBlastIndexFns.h" #include "NvBlastAssert.h" #include "NvBlastMemory.h" #include "NvBlastMath.h" #include "NvBlastPreprocessorInternal.h" #include <algorithm> namespace Nv { namespace Blast { /** Class to hold chunk descriptor and annotation context for sorting a list of indices */ class ChunksOrdered { public: ChunksOrdered(const NvBlastChunkDesc* descs, const char* annotation) : m_descs(descs), m_annotation(annotation), m_chunkMap(nullptr), m_chunkInvMap(nullptr) {} // Map and inverse to apply to chunk descs bool setMap(const uint32_t* map, const uint32_t* inv) { if ((map == nullptr) != (inv == nullptr)) { return false; } m_chunkMap = map; m_chunkInvMap = inv; return true; } bool operator () (uint32_t ii0, uint32_t ii1) const { const uint32_t i0 = m_chunkMap ? m_chunkMap[ii0] : ii0; const uint32_t i1 = m_chunkMap ? m_chunkMap[ii1] : ii1; const bool upperSupport0 = (m_annotation[i0] & Asset::ChunkAnnotation::UpperSupport) != 0; const bool upperSupport1 = (m_annotation[i1] & Asset::ChunkAnnotation::UpperSupport) != 0; if (upperSupport0 != upperSupport1) { return upperSupport0; // If one is uppersupport and one is subsupport, uppersupport should come first } const uint32_t p0 = m_descs[i0].parentChunkDescIndex; const uint32_t p1 = m_descs[i1].parentChunkDescIndex; // Parent chunk index (+1 so that UINT32_MAX becomes the lowest value) const uint32_t pp0 = 1 + (m_chunkInvMap && !isInvalidIndex(p0) ? m_chunkInvMap[p0] : p0); const uint32_t pp1 = 1 + (m_chunkInvMap && !isInvalidIndex(p1) ? m_chunkInvMap[p1] : p1); return pp0 < pp1; // With the same support relationship, order by parent index } private: const NvBlastChunkDesc* m_descs; const char* m_annotation; const uint32_t* m_chunkMap; const uint32_t* m_chunkInvMap; }; } // namespace Blast } // namespace Nv using namespace Nv::Blast; extern "C" { bool NvBlastBuildAssetDescChunkReorderMap(uint32_t* chunkReorderMap, const NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn) { NVBLASTLL_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastBuildAssetDescChunkReorderMap: NULL chunkDescs input with non-zero chunkCount", return false); NVBLASTLL_CHECK(chunkReorderMap == nullptr || chunkCount != 0, logFn, "NvBlastBuildAssetDescChunkReorderMap: NULL chunkReorderMap input with non-zero chunkCount", return false); NVBLASTLL_CHECK(chunkCount == 0 || scratch != nullptr, logFn, "NvBlastBuildAssetDescChunkReorderMap: NULL scratch input with non-zero chunkCount", return false); uint32_t* composedMap = static_cast<uint32_t*>(scratch); scratch = pointerOffset(scratch, chunkCount * sizeof(uint32_t)); uint32_t* chunkMap = static_cast<uint32_t*>(scratch); scratch = pointerOffset(scratch, chunkCount * sizeof(uint32_t)); char* chunkAnnotation = static_cast<char*>(scratch); scratch = pointerOffset(scratch, chunkCount * sizeof(char)); uint32_t supportChunkCount; uint32_t leafChunkCount; if (!Asset::ensureExactSupportCoverage(supportChunkCount, leafChunkCount, chunkAnnotation, chunkCount, const_cast<NvBlastChunkDesc*>(chunkDescs), true, logFn)) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastBuildAssetDescChunkReorderMap: chunk descriptors did not have exact coverage, map could not be built. Use NvBlastEnsureAssetExactSupportCoverage to fix descriptors."); return false; } // Initialize composedMap and its inverse to identity for (uint32_t i = 0; i < chunkCount; ++i) { composedMap[i] = i; chunkReorderMap[i] = i; } // Create a chunk ordering operator using the composedMap ChunksOrdered chunksOrdered(chunkDescs, chunkAnnotation); chunksOrdered.setMap(composedMap, chunkReorderMap); // Check initial order bool ordered = true; if (chunkCount > 1) { for (uint32_t i = chunkCount - 1; ordered && i--;) { ordered = !chunksOrdered(i + 1, i); } } if (ordered) { return true; // Initially ordered, return true } NVBLAST_ASSERT(chunkCount > 1); // Max depth is bounded by chunkCount, so that is the vound on the number of iterations uint32_t iter = chunkCount; do { // Reorder based on current composed map for (uint32_t i = 0; i < chunkCount; ++i) { chunkMap[i] = i; } std::stable_sort(chunkMap, chunkMap + chunkCount, chunksOrdered); // Fold chunkMap into composedMap for (uint32_t i = 0; i < chunkCount; ++i) { chunkMap[i] = composedMap[chunkMap[i]]; } for (uint32_t i = 0; i < chunkCount; ++i) { composedMap[i] = chunkMap[i]; chunkMap[i] = i; } invertMap(chunkReorderMap, composedMap, chunkCount); // Check order ordered = true; for (uint32_t i = chunkCount - 1; ordered && i--;) { ordered = !chunksOrdered(i + 1, i); } } while (!ordered && iter--); NVBLAST_ASSERT(ordered); return false; } void NvBlastApplyAssetDescChunkReorderMap ( NvBlastChunkDesc* reorderedChunkDescs, const NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, const uint32_t* chunkReorderMap, bool keepBondNormalChunkOrder, NvBlastLog logFn ) { NVBLASTLL_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL chunkDescs input with non-zero chunkCount", return); NVBLASTLL_CHECK(reorderedChunkDescs == nullptr || chunkCount != 0, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL reorderedChunkDescs input with non-zero chunkCount", return); NVBLASTLL_CHECK(chunkReorderMap == nullptr || chunkCount != 0, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL chunkReorderMap input with non-zero chunkCount", return); NVBLASTLL_CHECK(bondCount == 0 || bondDescs != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL bondDescs input with non-zero bondCount", return); NVBLASTLL_CHECK(bondDescs == nullptr || chunkReorderMap != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMap: NULL bondDescs input with NULL chunkReorderMap", return); // Copy chunk descs if (reorderedChunkDescs) { for (uint32_t i = 0; i < chunkCount; ++i) { reorderedChunkDescs[chunkReorderMap[i]] = chunkDescs[i]; uint32_t& parentIndex = reorderedChunkDescs[chunkReorderMap[i]].parentChunkDescIndex; if (parentIndex < chunkCount) { parentIndex = chunkReorderMap[parentIndex]; // If the parent index is valid, remap it too to reflect the new order } } } if (bondDescs) { for (uint32_t i = 0; i < bondCount; ++i) { NvBlastBondDesc& bondDesc = bondDescs[i]; uint32_t& index0 = bondDesc.chunkIndices[0]; uint32_t& index1 = bondDesc.chunkIndices[1]; const uint32_t newIndex0 = index0 < chunkCount ? chunkReorderMap[index0] : index0; const uint32_t newIndex1 = index1 < chunkCount ? chunkReorderMap[index1] : index1; if (keepBondNormalChunkOrder && (index0 < index1) != (newIndex0 < newIndex1)) { VecMath::mul(bondDesc.bond.normal, -1); } index0 = newIndex0; index1 = newIndex1; } } } void NvBlastApplyAssetDescChunkReorderMapInPlace ( NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, const uint32_t* chunkReorderMap, bool keepBondNormalChunkOrder, void* scratch, NvBlastLog logFn ) { NVBLASTLL_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMapInPlace: NULL chunkDescs input with non-zero chunkCount", return); NVBLASTLL_CHECK(chunkCount == 0 || scratch != nullptr, logFn, "NvBlastApplyAssetDescChunkReorderMapInPlace: NULL scratch input with non-zero chunkCount", return); NvBlastChunkDesc* chunksTemp = static_cast<NvBlastChunkDesc*>(scratch); memcpy(chunksTemp, chunkDescs, sizeof(NvBlastChunkDesc) * chunkCount); NvBlastApplyAssetDescChunkReorderMap(chunkDescs, chunksTemp, chunkCount, bondDescs, bondCount, chunkReorderMap, keepBondNormalChunkOrder, logFn); } bool NvBlastReorderAssetDescChunks ( NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap, bool keepBondNormalChunkOrder, void* scratch, NvBlastLog logFn ) { if (!NvBlastBuildAssetDescChunkReorderMap(chunkReorderMap, chunkDescs, chunkCount, scratch, logFn)) { NvBlastApplyAssetDescChunkReorderMapInPlace(chunkDescs, chunkCount, bondDescs, bondCount, chunkReorderMap, keepBondNormalChunkOrder, scratch, logFn); return false; } return true; } bool NvBlastEnsureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, void* scratch, NvBlastLog logFn) { NVBLASTLL_CHECK(chunkCount == 0 || chunkDescs != nullptr, logFn, "NvBlastEnsureAssetExactSupportCoverage: NULL chunkDescs input with non-zero chunkCount", return false); NVBLASTLL_CHECK(chunkCount == 0 || scratch != nullptr, logFn, "NvBlastEnsureAssetExactSupportCoverage: NULL scratch input with non-zero chunkCount", return false); uint32_t supportChunkCount; uint32_t leafChunkCount; return Asset::ensureExactSupportCoverage(supportChunkCount, leafChunkCount, static_cast<char*>(scratch), chunkCount, chunkDescs, false, logFn); } } // extern "C"
11,513
C++
38.703448
211
0.689916
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastAsset.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastAssert.h" #include "NvBlastAsset.h" #include "NvBlastActor.h" #include "NvBlastMath.h" #include "NvBlastPreprocessorInternal.h" #include "NvBlastIndexFns.h" #include "NvBlastActorSerializationBlock.h" #include "NvBlastMemory.h" #include <algorithm> //#include <random> namespace Nv { namespace Blast { //////// Local helper functions //////// /** Helper function to validate the input parameters for NvBlastCreateAsset. See NvBlastCreateAsset for parameter definitions. */ static bool solverAssetBuildValidateInput(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn) { if (mem == nullptr) { NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL mem pointer input."); return false; } if (desc == nullptr) { NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL desc pointer input."); return false; } if (desc->chunkCount == 0) { NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: Zero chunk count not allowed."); return false; } if (desc->chunkDescs == nullptr) { NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL chunkDescs pointer input."); return false; } if (desc->bondCount != 0 && desc->bondDescs == nullptr) { NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: bondCount non-zero but NULL bondDescs pointer input."); return false; } if (scratch == nullptr) { NVBLASTLL_LOG_ERROR(logFn, "AssetBuildValidateInput: NULL scratch pointer input."); return false; } return true; } struct AssetDataOffsets { size_t m_chunks; size_t m_bonds; size_t m_subtreeLeafChunkCounts; size_t m_supportChunkIndices; size_t m_chunkToGraphNodeMap; size_t m_graphAdjacencyPartition; size_t m_graphAdjacentNodeIndices; size_t m_graphAdjacentBondIndices; }; static size_t createAssetDataOffsets(AssetDataOffsets& offsets, uint32_t chunkCount, uint32_t graphNodeCount, uint32_t bondCount) { NvBlastCreateOffsetStart(sizeof(Asset)); NvBlastCreateOffsetAlign16(offsets.m_chunks, chunkCount * sizeof(NvBlastChunk)); NvBlastCreateOffsetAlign16(offsets.m_bonds, bondCount * sizeof(NvBlastBond)); NvBlastCreateOffsetAlign16(offsets.m_subtreeLeafChunkCounts, chunkCount * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_supportChunkIndices, graphNodeCount * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_chunkToGraphNodeMap, chunkCount * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_graphAdjacencyPartition, (graphNodeCount + 1) * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_graphAdjacentNodeIndices, (2 * bondCount) * sizeof(uint32_t)); NvBlastCreateOffsetAlign16(offsets.m_graphAdjacentBondIndices, (2 * bondCount) * sizeof(uint32_t)); return NvBlastCreateOffsetEndAlign16(); } Asset* initializeAsset(void* mem, uint32_t chunkCount, uint32_t graphNodeCount, uint32_t leafChunkCount, uint32_t firstSubsupportChunkIndex, uint32_t bondCount, NvBlastLog logFn) { // Data offsets AssetDataOffsets offsets; const size_t dataSize = createAssetDataOffsets(offsets, chunkCount, graphNodeCount, bondCount); // Restricting our data size to < 4GB so that we may use uint32_t offsets if (dataSize > (size_t)UINT32_MAX) { NVBLASTLL_LOG_ERROR(logFn, "Nv::Blast::allocateAsset: Asset data size will exceed 4GB. Instance not created.\n"); return nullptr; } // Zero memory and cast to Asset Asset* asset = reinterpret_cast<Asset*>(memset(mem, 0, dataSize)); // Fill in fields const size_t graphOffset = NV_OFFSET_OF(Asset, m_graph); asset->m_header.dataType = NvBlastDataBlock::AssetDataBlock; asset->m_header.formatVersion = 0; // Not currently using this field asset->m_header.size = (uint32_t)dataSize; asset->m_header.reserved = 0; memset(&asset->m_ID, 0, sizeof(NvBlastID)); asset->m_chunkCount = chunkCount; asset->m_graph.m_nodeCount = graphNodeCount; asset->m_graph.m_chunkIndicesOffset = (uint32_t)(offsets.m_supportChunkIndices - graphOffset); asset->m_graph.m_adjacencyPartitionOffset = (uint32_t)(offsets.m_graphAdjacencyPartition - graphOffset); asset->m_graph.m_adjacentNodeIndicesOffset = (uint32_t)(offsets.m_graphAdjacentNodeIndices - graphOffset); asset->m_graph.m_adjacentBondIndicesOffset = (uint32_t)(offsets.m_graphAdjacentBondIndices - graphOffset); asset->m_leafChunkCount = leafChunkCount; asset->m_firstSubsupportChunkIndex = firstSubsupportChunkIndex; asset->m_bondCount = bondCount; asset->m_chunksOffset = (uint32_t)offsets.m_chunks; asset->m_bondsOffset = (uint32_t)offsets.m_bonds; asset->m_subtreeLeafChunkCountsOffset = (uint32_t)offsets.m_subtreeLeafChunkCounts; asset->m_chunkToGraphNodeMapOffset = (uint32_t)offsets.m_chunkToGraphNodeMap; // Ensure Bonds remain aligned NV_COMPILE_TIME_ASSERT((sizeof(NvBlastBond) & 0xf) == 0); // Ensure Bonds are aligned - note, this requires that the block be aligned NVBLAST_ASSERT((uintptr_t(asset->getBonds()) & 0xf) == 0); return asset; } /** Tests for a loop in a digraph starting at a given graph vertex. Using the implied digraph given by the chunkDescs' parentChunkIndex fields, the graph is walked from the chunk descriptor chunkDescs[chunkIndex], to determine if that walk leads to a loop. Input: chunkDescs - the chunk descriptors chunkDescIndex - the index of the starting chunk descriptor Return: true if a loop is found, false otherwise. */ NV_INLINE bool testForLoop(const NvBlastChunkDesc* chunkDescs, uint32_t chunkDescIndex) { NVBLAST_ASSERT(!isInvalidIndex(chunkDescIndex)); uint32_t chunkDescIndex1 = chunkDescs[chunkDescIndex].parentChunkDescIndex; if (isInvalidIndex(chunkDescIndex1)) { return false; } uint32_t chunkDescIndex2 = chunkDescs[chunkDescIndex1].parentChunkDescIndex; if (isInvalidIndex(chunkDescIndex2)) { return false; } do { // advance index 1 chunkDescIndex1 = chunkDescs[chunkDescIndex1].parentChunkDescIndex; // No need to check for termination here. index 2 would find it first. // advance index 2 twice and check for incidence with index 1 as well as termination if ((chunkDescIndex2 = chunkDescs[chunkDescIndex2].parentChunkDescIndex) == chunkDescIndex1) { return true; } if (isInvalidIndex(chunkDescIndex2)) { return false; } if ((chunkDescIndex2 = chunkDescs[chunkDescIndex2].parentChunkDescIndex) == chunkDescIndex1) { return true; } } while (!isInvalidIndex(chunkDescIndex2)); return false; } /** Tests a set of chunk descriptors to see if the implied hierarchy describes valid trees. A single tree implies that only one of the chunkDescs has an invalid (invalidIndex<uint32_t>()) parentChunkIndex, and all other chunks are descendents of that chunk. Passed set of chunk is checked to contain one or more single trees. Input: chunkCount - the number of chunk descriptors chunkDescs - an array of chunk descriptors of length chunkCount logFn - message function (see NvBlastLog definition). Return: true if the descriptors imply a valid trees, false otherwise. */ static bool testForValidTrees(uint32_t chunkCount, const NvBlastChunkDesc* chunkDescs, NvBlastLog logFn) { for (uint32_t i = 0; i < chunkCount; ++i) { // Ensure there are no loops if (testForLoop(chunkDescs, i)) { NVBLASTLL_LOG_WARNING(logFn, "testForValidTrees: loop found. Asset will not be created."); return false; } } return true; } #if 0 /** * Helper to generate random GUID */ static NvBlastID NvBlastExtCreateRandomID() { NvBlastID id; static std::default_random_engine re; *reinterpret_cast<uint32_t*>(&id.data[0]) = re(); *reinterpret_cast<uint32_t*>(&id.data[4]) = re(); *reinterpret_cast<uint32_t*>(&id.data[8]) = re(); *reinterpret_cast<uint32_t*>(&id.data[12]) = re(); return id; } #endif // CRC-32C (iSCSI) polynomial in reversed bit order. inline uint32_t crc32c(uint32_t crc, const char* buf, size_t len) { crc = ~crc; while (len--) { crc ^= *buf++; for (int k = 0; k < 8; k++) crc = (crc >> 1) ^ (-(int)(crc & 1) & 0x82f63b78); } return ~crc; } /** * Helper to generate GUID from NvBlastAsset memory */ static NvBlastID createIDFromAsset(const NvBlastAsset* asset, NvBlastLog logFn) { // Divide memory into quarters const char* m0 = reinterpret_cast<const char*>(asset); const char* m4 = m0 + NvBlastAssetGetSize(asset, logFn); const char* m2 = m0 + (m4 - m0) / 2; const char* m1 = m0 + (m2 - m0) / 2; const char* m3 = m2 + (m4 - m2) / 2; // CRC hash quarters const uint32_t a = crc32c(0, m0, m1 - m0); const uint32_t b = crc32c(a, m1, m2 - m1); const uint32_t c = crc32c(b, m2, m3 - m2); const uint32_t d = crc32c(c, m3, m4 - m3); // Build ID out of hashes NvBlastID id; *reinterpret_cast<uint32_t*>(&id.data[0x0]) = a; *reinterpret_cast<uint32_t*>(&id.data[0x4]) = b; *reinterpret_cast<uint32_t*>(&id.data[0x8]) = c; *reinterpret_cast<uint32_t*>(&id.data[0xc]) = d; return id; } /** Struct to hold chunk indices and bond index for sorting Utility struct used by NvBlastCreateAsset in order to arrange bond data in a lookup table, and also to easily identify redundant input. */ struct BondSortData { BondSortData(uint32_t c0, uint32_t c1, uint32_t b) : m_c0(c0), m_c1(c1), m_b(b) {} uint32_t m_c0; uint32_t m_c1; uint32_t m_b; }; /** Functional class for sorting a list of BondSortData */ class BondsOrdered { public: bool operator () (const BondSortData& bond0, const BondSortData& bond1) const { return (bond0.m_c0 != bond1.m_c0) ? (bond0.m_c0 < bond1.m_c0) : (bond0.m_c1 != bond1.m_c1 ? bond0.m_c1 < bond1.m_c1 : bond0.m_b < bond1.m_b); } }; //////// Asset static functions //////// size_t Asset::getMemorySize(const NvBlastAssetDesc* desc) { NVBLAST_ASSERT(desc != nullptr); // Count graph nodes uint32_t graphNodeCount = 0; for (uint32_t i = 0; i < desc->chunkCount; ++i) { graphNodeCount += (uint32_t)((desc->chunkDescs[i].flags & NvBlastChunkDesc::SupportFlag) != 0); } for (uint32_t i = 0; i < desc->bondCount; ++i) { const NvBlastBondDesc& bondDesc = desc->bondDescs[i]; const uint32_t chunkIndex0 = bondDesc.chunkIndices[0]; const uint32_t chunkIndex1 = bondDesc.chunkIndices[1]; if ((isInvalidIndex(chunkIndex0) && chunkIndex1 < desc->chunkCount) || (isInvalidIndex(chunkIndex1) && chunkIndex0 < desc->chunkCount)) { ++graphNodeCount; // world node break; } } AssetDataOffsets offsets; return createAssetDataOffsets(offsets, desc->chunkCount, graphNodeCount, desc->bondCount); } size_t Asset::createRequiredScratch(const NvBlastAssetDesc* desc, NvBlastLog logFn) { NVBLASTLL_CHECK(desc != nullptr, logFn, "Asset::createRequiredScratch: NULL desc.", return 0); // Aligned and padded return 16 + align16(desc->chunkCount*sizeof(char)) + align16(desc->chunkCount*sizeof(uint32_t)) + align16(2 * desc->bondCount*sizeof(BondSortData)) + align16(desc->bondCount*sizeof(uint32_t)); } Asset* Asset::create(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn) { #if NVBLASTLL_CHECK_PARAMS if (!solverAssetBuildValidateInput(mem, desc, scratch, logFn)) { return nullptr; } #else NV_UNUSED(solverAssetBuildValidateInput); #endif NVBLASTLL_CHECK((reinterpret_cast<uintptr_t>(mem) & 0xF) == 0, logFn, "NvBlastCreateAsset: mem pointer not 16-byte aligned.", return nullptr); // Make sure we have valid trees before proceeding if (!testForValidTrees(desc->chunkCount, desc->chunkDescs, logFn)) { return nullptr; } scratch = (void*)align16((size_t)scratch); // Bump to 16-byte alignment (see padding in NvBlastGetRequiredScratchForCreateAsset) // reserve chunkAnnotation on scratch char* chunkAnnotation = reinterpret_cast<char*>(scratch); scratch = pointerOffset(scratch, align16(desc->chunkCount)); // test for coverage, chunkAnnotation will be filled there. uint32_t leafChunkCount; uint32_t supportChunkCount; if (!ensureExactSupportCoverage(supportChunkCount, leafChunkCount, chunkAnnotation, desc->chunkCount, const_cast<NvBlastChunkDesc*>(desc->chunkDescs), true, logFn)) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastCreateAsset: support coverage is not exact. Asset will not be created. The Asset helper function NvBlastEnsureAssetExactSupportCoverage may be used to create exact coverage."); return nullptr; } // test for valid chunk order if (!testForValidChunkOrder(desc->chunkCount, desc->chunkDescs, chunkAnnotation, scratch)) { NVBLASTLL_LOG_ERROR(logFn, "NvBlastCreateAsset: chunks order is invalid. Asset will not be created. Use Asset helper functions such as NvBlastBuildAssetDescChunkReorderMap to fix descriptor order."); return nullptr; } // Find first subsupport chunk uint32_t firstSubsupportChunkIndex = desc->chunkCount; // Set value to chunk count if no subsupport chunks are found for (uint32_t i = 0; i < desc->chunkCount; ++i) { if ((chunkAnnotation[i] & ChunkAnnotation::UpperSupport) == 0) { firstSubsupportChunkIndex = i; break; } } // Create map from global indices to graph node indices and initialize to invalid values uint32_t* graphNodeIndexMap = (uint32_t*)scratch; scratch = pointerOffset(scratch, align16(desc->chunkCount * sizeof(uint32_t))); memset(graphNodeIndexMap, 0xFF, desc->chunkCount*sizeof(uint32_t)); // Fill graphNodeIndexMap uint32_t graphNodeCount = 0; for (uint32_t i = 0; i < desc->chunkCount; ++i) { if ((chunkAnnotation[i] & ChunkAnnotation::Support) != 0) { graphNodeIndexMap[i] = graphNodeCount++; } } NVBLAST_ASSERT(graphNodeCount == supportChunkCount); // Scratch array for bond sorting, of size 2*desc->bondCount BondSortData* bondSortArray = (BondSortData*)scratch; scratch = pointerOffset(scratch, align16(2 * desc->bondCount*sizeof(BondSortData))); // Bond remapping array of size desc->bondCount uint32_t* bondMap = (uint32_t*)scratch; memset(bondMap, 0xFF, desc->bondCount*sizeof(uint32_t)); // Eliminate bad or redundant bonds, finding actual bond count uint32_t bondCount = 0; if (desc->bondCount > 0) { // Check for duplicates from input data as well as non-support chunk indices. All such bonds must be removed. bool invalidFound = false; bool duplicateFound = false; bool nonSupportFound = false; // Construct temp array of chunk index pairs and bond indices. This array is symmetrized to hold the reversed chunk indices as well. uint32_t bondSortArraySize = 0; BondSortData* t = bondSortArray; bool addWorldNode = false; for (uint32_t i = 0; i < desc->bondCount; ++i) { const NvBlastBondDesc& bondDesc = desc->bondDescs[i]; const uint32_t chunkIndex0 = bondDesc.chunkIndices[0]; const uint32_t chunkIndex1 = bondDesc.chunkIndices[1]; if ((chunkIndex0 >= desc->chunkCount && !isInvalidIndex(chunkIndex0)) || (chunkIndex1 >= desc->chunkCount && !isInvalidIndex(chunkIndex1)) || chunkIndex0 == chunkIndex1) { invalidFound = true; continue; } uint32_t graphIndex0; if (!isInvalidIndex(chunkIndex0)) { graphIndex0 = graphNodeIndexMap[chunkIndex0]; } else { addWorldNode = true; graphIndex0 = graphNodeCount; // Will set graphNodeCount = supportChunkCount + 1 } uint32_t graphIndex1; if (!isInvalidIndex(chunkIndex1)) { graphIndex1 = graphNodeIndexMap[chunkIndex1]; } else { addWorldNode = true; graphIndex1 = graphNodeCount; // Will set graphNodeCount = supportChunkCount + 1 } if (isInvalidIndex(graphIndex0) || isInvalidIndex(graphIndex1)) { nonSupportFound = true; continue; } t[bondSortArraySize++] = BondSortData(graphIndex0, graphIndex1, i); t[bondSortArraySize++] = BondSortData(graphIndex1, graphIndex0, i); } // Sort the temp array std::sort(bondSortArray, bondSortArray + bondSortArraySize, BondsOrdered()); uint32_t symmetrizedBondCount = 0; for (uint32_t i = 0; i < bondSortArraySize; ++i) { const bool duplicate = i > 0 && bondSortArray[i].m_c0 == bondSortArray[i - 1].m_c0 && bondSortArray[i].m_c1 == bondSortArray[i - 1].m_c1; // Since the array is sorted, uniqueness may be tested by only considering the previous element duplicateFound = duplicateFound || duplicate; if (!duplicate) { // Keep this bond if (symmetrizedBondCount != i) { bondSortArray[symmetrizedBondCount] = bondSortArray[i]; // Compact array if we've dropped bonds } ++symmetrizedBondCount; } } NVBLAST_ASSERT((symmetrizedBondCount & 1) == 0); // Because we symmetrized, there should be an even number bondCount = symmetrizedBondCount / 2; // World node references found in bonds; add a world node if (addWorldNode) { ++graphNodeCount; } // Report warnings if (invalidFound) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastCreateAsset: Invalid bonds found (non-existent or same chunks referenced) and removed from asset."); } if (duplicateFound) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastCreateAsset: Duplicate bonds found and removed from asset."); } if (nonSupportFound) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastCreateAsset: Bonds referencing non-support chunks found and removed from asset."); } } // Allocate memory for asset Asset* asset = initializeAsset(mem, desc->chunkCount, graphNodeCount, leafChunkCount, firstSubsupportChunkIndex, bondCount, logFn); // Asset data pointers SupportGraph& graph = asset->m_graph; NvBlastChunk* chunks = asset->getChunks(); NvBlastBond* bonds = asset->getBonds(); uint32_t* subtreeLeafChunkCounts = asset->getSubtreeLeafChunkCounts(); // Create chunks uint32_t* graphChunkIndices = graph.getChunkIndices(); memset(graphChunkIndices, 0xFF, graphNodeCount * sizeof(uint32_t)); // Ensures unmapped node indices go to invalidIndex - this is important for the world node, if added for (uint32_t i = 0; i < desc->chunkCount; ++i) { const NvBlastChunkDesc& chunkDesc = desc->chunkDescs[i]; NvBlastChunk& assetChunk = chunks[i]; memcpy(assetChunk.centroid, chunkDesc.centroid, 3 * sizeof(float)); assetChunk.volume = chunkDesc.volume; assetChunk.parentChunkIndex = chunkDesc.parentChunkDescIndex; assetChunk.firstChildIndex = invalidIndex<uint32_t>(); // Will be filled in below assetChunk.childIndexStop = assetChunk.firstChildIndex; assetChunk.userData = chunkDesc.userData; const uint32_t graphNodeIndex = graphNodeIndexMap[i]; if (!isInvalidIndex(graphNodeIndex)) { graphChunkIndices[graphNodeIndex] = i; } } // Copy chunkToGraphNodeMap memcpy(asset->getChunkToGraphNodeMap(), graphNodeIndexMap, desc->chunkCount * sizeof(uint32_t)); // Count chunk children for (uint32_t i = 0; i < desc->chunkCount; ++i) { const uint32_t parentChunkIndex = chunks[i].parentChunkIndex; if (!isInvalidIndex(parentChunkIndex)) { if (chunks[parentChunkIndex].childIndexStop == chunks[parentChunkIndex].firstChildIndex) { chunks[parentChunkIndex].childIndexStop = chunks[parentChunkIndex].firstChildIndex = i; } ++chunks[parentChunkIndex].childIndexStop; } } // Create bonds uint32_t* graphAdjacencyPartition = graph.getAdjacencyPartition(); uint32_t* graphAdjacentNodeIndices = graph.getAdjacentNodeIndices(); uint32_t* graphAdjacentBondIndices = graph.getAdjacentBondIndices(); if (bondCount > 0) { // Create the lookup table from the sorted array createIndexStartLookup<uint32_t>(graphAdjacencyPartition, 0, graphNodeCount - 1, &bondSortArray->m_c0, 2 * bondCount, sizeof(BondSortData)); // Write the adjacent chunk and bond index data uint32_t bondIndex = 0; for (uint32_t i = 0; i < 2 * bondCount; ++i) { const BondSortData& bondSortData = bondSortArray[i]; graphAdjacentNodeIndices[i] = bondSortData.m_c1; const uint32_t oldBondIndex = bondSortData.m_b; const NvBlastBondDesc& bondDesc = desc->bondDescs[oldBondIndex]; if (isInvalidIndex(bondMap[oldBondIndex])) { bonds[bondIndex] = bondDesc.bond; bondMap[oldBondIndex] = bondIndex++; } NVBLAST_ASSERT(bondMap[oldBondIndex] < bondCount); graphAdjacentBondIndices[i] = bondMap[oldBondIndex]; } } else { // No bonds - zero out all partition elements (including last one, to give zero size for adjacent data arrays) memset(graphAdjacencyPartition, 0, (graphNodeCount + 1)*sizeof(uint32_t)); } // Count subtree leaf chunks memset(subtreeLeafChunkCounts, 0, desc->chunkCount*sizeof(uint32_t)); uint32_t* breadthFirstChunkIndices = graphNodeIndexMap; // Reusing graphNodeIndexMap ... graphNodeIndexMap may no longer be used for (uint32_t startChunkIndex = 0; startChunkIndex < desc->chunkCount; ++startChunkIndex) { if (!isInvalidIndex(chunks[startChunkIndex].parentChunkIndex)) { break; // Only iterate through root chunks at this level } const uint32_t enumeratedChunkCount = enumerateChunkHierarchyBreadthFirst(breadthFirstChunkIndices, desc->chunkCount, chunks, startChunkIndex); for (uint32_t chunkNum = enumeratedChunkCount; chunkNum--;) { const uint32_t chunkIndex = breadthFirstChunkIndices[chunkNum]; const NvBlastChunk& chunk = chunks[chunkIndex]; if (chunk.childIndexStop <= chunk.firstChildIndex) { subtreeLeafChunkCounts[chunkIndex] = 1; } if (!isInvalidIndex(chunk.parentChunkIndex)) { subtreeLeafChunkCounts[chunk.parentChunkIndex] += subtreeLeafChunkCounts[chunkIndex]; } } } // Assign ID after data has been created asset->m_ID = createIDFromAsset(asset, logFn); return asset; } bool Asset::ensureExactSupportCoverage(uint32_t& supportChunkCount, uint32_t& leafChunkCount, char* chunkAnnotation, uint32_t chunkCount, NvBlastChunkDesc* chunkDescs, bool testOnly, NvBlastLog logFn) { // Clear leafChunkCount leafChunkCount = 0; memset(chunkAnnotation, 0, chunkCount); // Walk up the hierarchy from all chunks and mark all parents for (uint32_t i = 0; i < chunkCount; ++i) { if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent) { continue; } uint32_t chunkDescIndex = i; while (!isInvalidIndex(chunkDescIndex = chunkDescs[chunkDescIndex].parentChunkDescIndex)) { chunkAnnotation[chunkDescIndex] = Asset::ChunkAnnotation::Parent; // Note as non-leaf } } // Walk up the hierarchy from all leaves (counting them with leafChunkCount) and keep track of the support chunks found on each chain // Exactly one support chunk should be found on each walk. Remove all but the highest support markings if more than one are found. bool redundantCoverage = false; bool insufficientCoverage = false; for (uint32_t i = 0; i < chunkCount; ++i) { if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent) { continue; } ++leafChunkCount; uint32_t supportChunkDescIndex; supportChunkDescIndex = invalidIndex<uint32_t>(); uint32_t chunkDescIndex = i; bool doneWithChain = false; do { if (chunkDescs[chunkDescIndex].flags & NvBlastChunkDesc::SupportFlag) { if (chunkAnnotation[chunkDescIndex] & Asset::ChunkAnnotation::Support) { // We've already been up this chain and marked this as support, so we have unique coverage already doneWithChain = true; } chunkAnnotation[chunkDescIndex] |= Asset::ChunkAnnotation::Support; // Note as support if (!isInvalidIndex(supportChunkDescIndex)) { if (testOnly) { return false; } redundantCoverage = true; chunkAnnotation[supportChunkDescIndex] &= ~Asset::ChunkAnnotation::Support; // Remove support marking do // Run up the hierarchy from supportChunkDescIndex to chunkDescIndex and remove the supersupport markings { supportChunkDescIndex = chunkDescs[supportChunkDescIndex].parentChunkDescIndex; chunkAnnotation[supportChunkDescIndex] &= ~Asset::ChunkAnnotation::SuperSupport; // Remove supersupport marking } while (supportChunkDescIndex != chunkDescIndex); } supportChunkDescIndex = chunkDescIndex; } else if (!isInvalidIndex(supportChunkDescIndex)) { chunkAnnotation[chunkDescIndex] |= Asset::ChunkAnnotation::SuperSupport; // Not a support chunk and we've already found a support chunk, so this is super-support } } while (!doneWithChain && !isInvalidIndex(chunkDescIndex = chunkDescs[chunkDescIndex].parentChunkDescIndex)); if (isInvalidIndex(supportChunkDescIndex)) { if (testOnly) { return false; } insufficientCoverage = true; } } if (redundantCoverage) { NVBLASTLL_LOG_INFO(logFn, "NvBlastCreateAsset: some leaf-to-root chains had more than one support chunk. Some support chunks removed."); } if (insufficientCoverage) { // If coverage was insufficient, then walk up the hierarchy again and mark all chunks that have a support descendant. // This will allow us to place support chunks at the highest possible level to obtain coverage. for (uint32_t i = 0; i < chunkCount; ++i) { if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent) { continue; } bool supportFound = false; uint32_t chunkDescIndex = i; do { if (chunkAnnotation[chunkDescIndex] & Asset::ChunkAnnotation::Support) { supportFound = true; } else if (supportFound) { chunkAnnotation[chunkDescIndex] |= Asset::ChunkAnnotation::SuperSupport; // Note that a descendant has support } } while (!isInvalidIndex(chunkDescIndex = chunkDescs[chunkDescIndex].parentChunkDescIndex)); } // Now walk up the hierarchy from each leaf one more time, and make sure there is coverage for (uint32_t i = 0; i < chunkCount; ++i) { if (chunkAnnotation[i] & Asset::ChunkAnnotation::Parent) { continue; } uint32_t previousChunkDescIndex; previousChunkDescIndex = invalidIndex<uint32_t>(); uint32_t chunkDescIndex = i; for (;;) { if (chunkAnnotation[chunkDescIndex] & Asset::ChunkAnnotation::Support) { break; // There is support along this chain } if (chunkAnnotation[chunkDescIndex] & Asset::ChunkAnnotation::SuperSupport) { NVBLAST_ASSERT(!isInvalidIndex(previousChunkDescIndex)); // This should be impossible chunkAnnotation[previousChunkDescIndex] |= Asset::ChunkAnnotation::Support; // There is no support along this chain, and this is the highest place where we can put support break; } previousChunkDescIndex = chunkDescIndex; chunkDescIndex = chunkDescs[chunkDescIndex].parentChunkDescIndex; if (isInvalidIndex(chunkDescIndex)) { chunkAnnotation[previousChunkDescIndex] |= Asset::ChunkAnnotation::Support; // There was no support found anywhere in the hierarchy, so we add it at the root break; } } } NVBLASTLL_LOG_INFO(logFn, "NvBlastCreateAsset: some leaf-to-root chains had no support chunks. Support chunks added."); } // Apply changes and count the number of support chunks supportChunkCount = 0; for (uint32_t i = 0; i < chunkCount; ++i) { const bool wasSupport = (chunkDescs[i].flags & NvBlastChunkDesc::SupportFlag) != 0; const bool nowSupport = (chunkAnnotation[i] & Asset::ChunkAnnotation::Support) != 0; if (wasSupport != nowSupport) { chunkDescs[i].flags ^= NvBlastChunkDesc::SupportFlag; } if ((chunkDescs[i].flags & NvBlastChunkDesc::SupportFlag) != 0) { ++supportChunkCount; } } return !redundantCoverage && !insufficientCoverage; } bool Asset::testForValidChunkOrder(uint32_t chunkCount, const NvBlastChunkDesc* chunkDescs, const char* chunkAnnotation, void* scratch) { char* chunkMarks = static_cast<char*>(memset(scratch, 0, chunkCount)); uint32_t currentParentChunkDescIndex = invalidIndex<uint32_t>(); for (uint32_t i = 0; i < chunkCount; ++i) { const uint32_t parentChunkDescIndex = chunkDescs[i].parentChunkDescIndex; if (!isInvalidIndex(parentChunkDescIndex) && parentChunkDescIndex >= i) // 'chunks should come after their parents' { return false; } if (parentChunkDescIndex != currentParentChunkDescIndex) { if (!isInvalidIndex(currentParentChunkDescIndex)) { chunkMarks[currentParentChunkDescIndex] = 1; } currentParentChunkDescIndex = parentChunkDescIndex; if (isInvalidIndex(currentParentChunkDescIndex)) // 'root chunks should go first' { return false; } else if (chunkMarks[currentParentChunkDescIndex] != 0) // 'all chunks with same parent index should go in a row' { return false; } } if (i < chunkCount - 1) { const bool upperSupport0 = (chunkAnnotation[i] & ChunkAnnotation::UpperSupport) != 0; const bool upperSupport1 = (chunkAnnotation[i + 1] & ChunkAnnotation::UpperSupport) != 0; if (!upperSupport0 && upperSupport1) // 'upper-support chunks should come before subsupport chunks' { return false; } } } return true; } } // namespace Blast } // namespace Nv // API implementation extern "C" { size_t NvBlastGetRequiredScratchForCreateAsset(const NvBlastAssetDesc* desc, NvBlastLog logFn) { NVBLASTLL_CHECK(desc != nullptr, logFn, "NvBlastGetRequiredScratchForCreateAsset: NULL desc pointer input.", return 0); return Nv::Blast::Asset::createRequiredScratch(desc, logFn); } size_t NvBlastGetAssetMemorySize(const NvBlastAssetDesc* desc, NvBlastLog logFn) { NVBLASTLL_CHECK(desc != nullptr, logFn, "NvBlastGetAssetMemorySize: NULL desc input.", return 0); return Nv::Blast::Asset::getMemorySize(desc); } size_t NvBlastGetAssetMemorySizeFromSizeData(const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn) { NV_UNUSED(logFn); Nv::Blast::AssetDataOffsets offsets; return Nv::Blast::createAssetDataOffsets(offsets, sizeData.chunkCount, sizeData.nodeCount, sizeData.bondCount); } NvBlastAsset* NvBlastCreateAsset(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn) { return Nv::Blast::Asset::create(mem, desc, scratch, logFn); } size_t NvBlastAssetGetFamilyMemorySize(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetFamilyMemorySize: NULL asset pointer input.", return 0); return Nv::Blast::getFamilyMemorySize(reinterpret_cast<const Nv::Blast::Asset*>(asset)); } size_t NvBlastAssetGetFamilyMemorySizeFromSizeData(const NvBlastAssetMemSizeData& sizeData, NvBlastLog logFn) { NV_UNUSED(logFn); return Nv::Blast::getFamilyMemorySize(sizeData); } NvBlastID NvBlastAssetGetID(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetID: NULL asset pointer input.", NvBlastID zero; memset(&zero, 0, sizeof(NvBlastID)); return zero); return ((Nv::Blast::Asset*)asset)->m_ID; } bool NvBlastAssetSetID(NvBlastAsset* asset, const NvBlastID* id, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetSetID: NULL asset pointer input.", return false); NVBLASTLL_CHECK(id != nullptr, logFn, "NvBlastAssetSetID: NULL id pointer input.", return false); ((Nv::Blast::Asset*)asset)->m_ID = *id; return true; } uint32_t NvBlastAssetGetFormatVersion(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetFormatVersion: NULL asset input.", return UINT32_MAX); return ((Nv::Blast::Asset*)asset)->m_header.formatVersion; } uint32_t NvBlastAssetGetSize(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetSize: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->m_header.size; } uint32_t NvBlastAssetGetChunkCount(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetChunkCount: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->m_chunkCount; } uint32_t NvBlastAssetGetSupportChunkCount(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetSupportChunkCount: NULL asset input.", return 0); const Nv::Blast::Asset* a = reinterpret_cast<const Nv::Blast::Asset*>(asset); const Nv::Blast::SupportGraph& graph = a->m_graph; if (graph.m_nodeCount == 0) { return 0; // This shouldn't happen } return Nv::Blast::isInvalidIndex(graph.getChunkIndices()[graph.m_nodeCount - 1]) ? graph.m_nodeCount - 1 : graph.m_nodeCount; } uint32_t NvBlastAssetGetLeafChunkCount(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetLeafChunkCount: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->m_leafChunkCount; } uint32_t NvBlastAssetGetFirstSubsupportChunkIndex(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetFirstSubsupportChunkIndex: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->m_firstSubsupportChunkIndex; } uint32_t NvBlastAssetGetBondCount(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetBondCount: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->m_bondCount; } const NvBlastSupportGraph NvBlastAssetGetSupportGraph(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetSupportGraph: NULL asset input.", NvBlastSupportGraph blank; blank.nodeCount = 0; blank.chunkIndices = blank.adjacencyPartition = blank.adjacentNodeIndices = blank.adjacentBondIndices = nullptr; return blank); const Nv::Blast::SupportGraph& supportGraph = static_cast<const Nv::Blast::Asset*>(asset)->m_graph; NvBlastSupportGraph graph; graph.nodeCount = supportGraph.m_nodeCount; graph.chunkIndices = supportGraph.getChunkIndices(); graph.adjacencyPartition = supportGraph.getAdjacencyPartition(); graph.adjacentNodeIndices = supportGraph.getAdjacentNodeIndices(); graph.adjacentBondIndices = supportGraph.getAdjacentBondIndices(); return graph; } const uint32_t* NvBlastAssetGetChunkToGraphNodeMap(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetChunkToGraphNodeMap: NULL asset input.", return nullptr); return static_cast<const Nv::Blast::Asset*>(asset)->getChunkToGraphNodeMap(); } const NvBlastChunk* NvBlastAssetGetChunks(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetChunks: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->getChunks(); } const NvBlastBond* NvBlastAssetGetBonds(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetBonds: NULL asset input.", return 0); return ((Nv::Blast::Asset*)asset)->getBonds(); } uint32_t NvBlastAssetGetActorSerializationSizeUpperBound(const NvBlastAsset* asset, NvBlastLog logFn) { NVBLASTLL_CHECK(asset != nullptr, logFn, "NvBlastAssetGetActorSerializationSizeUpperBound: NULL asset input.", return 0); const Nv::Blast::Asset& solverAsset = *(const Nv::Blast::Asset*)asset; const uint32_t graphNodeCount = solverAsset.m_graph.m_nodeCount; // Calculate serialization size for an actor with all graph nodes (and therefore all bonds), and somehow with all graph nodes visible (after all, this is an upper bound). const uint64_t upperBound = Nv::Blast::getActorSerializationSize(graphNodeCount, solverAsset.getLowerSupportChunkCount(), graphNodeCount, solverAsset.getBondCount()); if (upperBound > UINT32_MAX) { NVBLASTLL_LOG_WARNING(logFn, "NvBlastAssetGetActorSerializationSizeUpperBound: Serialization block size exceeds 4GB. Returning 0.\n"); return 0; } return static_cast<uint32_t>(upperBound); } } // extern "C"
40,951
C++
37.094884
248
0.662817
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastFamily.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFAMILY_H #define NVBLASTFAMILY_H #include "NvBlastAsset.h" #include "NvPreprocessor.h" #include "NvBlastDLink.h" #include "NvBlastAtomic.h" #include "NvBlastMemory.h" #include <cstring> struct NvBlastAsset; namespace Nv { namespace Blast { // Forward declarations class FamilyGraph; class Actor; class Asset; /** Data header at the beginning of every NvBlastActor family The block address may be cast to a valid FamilyHeader pointer. */ struct FamilyHeader : public NvBlastDataBlock { /** The ID for the asset. This will be resolved into a pointer in the runtime data. */ NvBlastID m_assetID; /** Actors, of type Actor. Actors with support chunks will use this array in the range [0, m_asset->m_graphNodeCount), while subsupport actors will be placed in the range [m_asset->m_graphNodeCount, m_asset->getLowerSupportChunkCount()). */ NvBlastBlockArrayData(Actor, m_actorsOffset, getActors, m_asset->getLowerSupportChunkCount()); /** Visible chunk index links, of type IndexDLink<uint32_t>. getVisibleChunkIndexLinks returns an array of size m_asset->m_chunkCount of IndexDLink<uint32_t> (see IndexDLink). */ NvBlastBlockArrayData(IndexDLink<uint32_t>, m_visibleChunkIndexLinksOffset, getVisibleChunkIndexLinks, m_asset->m_chunkCount); /** Chunk actor IDs, of type uint32_t. These correspond to the ID of the actor which owns each chunk. A value of invalidIndex<uint32_t>() indicates no owner. getChunkActorIndices returns an array of size m_asset->m_firstSubsupportChunkIndex. */ NvBlastBlockArrayData(uint32_t, m_chunkActorIndicesOffset, getChunkActorIndices, m_asset->m_firstSubsupportChunkIndex); /** Graph node index links, of type uint32_t. The successor to index[i] is m_graphNodeIndexLinksOffset[i]. A value of invalidIndex<uint32_t>() indicates no successor. getGraphNodeIndexLinks returns an array of size m_asset->m_graphNodeCount. */ NvBlastBlockArrayData(uint32_t, m_graphNodeIndexLinksOffset, getGraphNodeIndexLinks, m_asset->m_graph.m_nodeCount); /** Health for each support chunk and subsupport chunk, of type float. To access support chunks, use the corresponding graph node index in the array returned by getLowerSupportChunkHealths. To access subsupport chunk healths, use getSubsupportChunkHealths (see documentation for details). */ NvBlastBlockArrayData(float, m_lowerSupportChunkHealthsOffset, getLowerSupportChunkHealths, m_asset->getLowerSupportChunkCount()); /** Utility function to get the start of the subsupport chunk health array. To access a subsupport chunk health indexed by i, use getSubsupportChunkHealths()[i - m_asset->m_firstSubsupportChunkIndex] \return the array of health values associated with all descendants of support chunks. */ float* getSubsupportChunkHealths() const { NVBLAST_ASSERT(m_asset != nullptr); return (float*)((uintptr_t)this + m_lowerSupportChunkHealthsOffset) + m_asset->m_graph.m_nodeCount; } /** Bond health for the interfaces between two chunks, of type float. Since the bond is shared by two chunks, the same bond health is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i]. getBondHealths returns the array of healths associated with all bonds in the support graph. */ NvBlastBlockArrayData(float, m_graphBondHealthsOffset, getBondHealths, m_asset->getBondCount()); /** Bond health for the interfaces between two chunks, of type float. Since the bond is shared by two chunks, the same bond health is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i]. getCachedBondHealths returns the array of manually cached healths associated with all bonds in the support graph. */ NvBlastBlockArrayData(float, m_graphCachedBondHealthsOffset, getCachedBondHealths, m_asset->getBondCount()); /** The instance graph for islands searching, of type FamilyGraph. Return the dynamic data generated for the support graph. (See FamilyGraph.) This is used to store current connectivity information based upon bond and chunk healths, as well as cached intermediate data for faster incremental updates. */ NvBlastBlockData(FamilyGraph, m_familyGraphOffset, getFamilyGraph); //////// Runtime data //////// /** The number of actors using this block. */ volatile uint32_t m_actorCount; /** The asset corresponding to all actors in this family. This is runtime data and will be resolved from m_assetID. */ union { const Asset* m_asset; uint64_t m_runtimePlaceholder; // Make sure we reserve enough room for an 8-byte pointer }; //////// Functions //////// /** Gets an actor from the actor array and validates it if it is not already valid. This increments the actor reference count. \param[in] index The index of the actor to borrow. Must be in the range [0, getActorsArraySize()). \return A pointer to the indexed Actor. */ Actor* borrowActor(uint32_t index); /** Invalidates the actor if it is not already invalid. This decrements the actor reference count, but does not free this block when the count goes to zero. \param[in] actor The actor to invalidate. */ void returnActor(Actor& actor); /** Returns a value to indicate whether or not the Actor with the given index is valid for use (active). \return true iff the indexed actor is active. */ bool isActorActive(uint32_t index) const; /** Retrieve the actor from an index. If actor is inactive nullptr is returned. \param[in] index The index of an actor. \return A pointer to the indexed actor if the actor is active, nullptr otherwise. */ Actor* getActorByIndex(uint32_t index) const; /** Retrieve the index of an actor associated with the given chunk. \param[in] chunkIndex The index of chunk. \return the index of associated actor in the FamilyHeader's getActors() array. */ uint32_t getChunkActorIndex(uint32_t chunkIndex) const; /** Retrieve the index of an actor associated with the given node. \param[in] nodeIndex The index of node. \return the index of associated actor in the FamilyHeader's getActors() array. */ uint32_t getNodeActorIndex(uint32_t nodeIndex) const; /** Retrieve an actor associated with the given chunk. \param[in] chunkIndex The index of chunk. \return A pointer to the actor if the actor is active, nullptr otherwise. */ Actor* getChunkActor(uint32_t chunkIndex) const; /** Retrieve an actor associated with the given node. \param[in] nodeIndex The index of node. \return A pointer to the actor if the actor is active, nullptr otherwise. */ Actor* getNodeActor(uint32_t nodeIndex) const; //////// Fracturing methods //////// /** Hierarchically distribute damage to child chunks. \param chunkIndex asset chunk index to hierarchically damage \param suboffset index of the first sub-support health \param healthDamage damage strength to apply \param chunkHealths instance chunk healths \param chunks asset chunk collection */ void fractureSubSupportNoEvents(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks); /** Hierarchically distribute damage to child chunks, recording a fracture event for each health damage applied. If outBuffer is too small, events are dropped but the chunks are still damaged. \param chunkIndex asset chunk index to hierarchically damage \param suboffset index of the first sub-support health \param healthDamage damage strength to apply \param chunkHealths instance chunk healths \param chunks asset chunk collection \param outBuffer target buffer for fracture events \param currentIndex current position in outBuffer - returns the number of damaged chunks \param maxCount capacity of outBuffer \param[in] filterActor pointer to the actor to filter commands that target other actors. May be NULL to apply all commands \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. */ void fractureSubSupport(uint32_t chunkIndex, uint32_t suboffset, float healthDamage, float* chunkHealths, const NvBlastChunk* chunks, NvBlastChunkFractureData* outBuffer, uint32_t* currentIndex, const uint32_t maxCount); /** Apply chunk fracture commands hierarchically. \param chunkFractureCount number of chunk fracture commands to apply \param chunkFractures array of chunk fracture commands \param filterActor pointer to the actor to filter commands corresponding to other actors. May be NULL to apply all commands \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. */ void fractureNoEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* chunkFractures, Actor* filterActor, NvBlastLog logFn); /** Apply chunk fracture commands hierarchically, recording a fracture event for each health damage applied. If events array is too small, events are dropped but the chunks are still damaged. \param chunkFractureCount number of chunk fracture commands to apply \param commands array of chunk fracture commands \param events target buffer for fracture events \param eventsSize number of available entries in 'events' \param count returns the number of damaged chunks \param[in] filterActor pointer to the actor to filter commands that target other actors. May be NULL to apply all commands \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. */ void fractureWithEvents(uint32_t chunkFractureCount, const NvBlastChunkFractureData* commands, NvBlastChunkFractureData* events, uint32_t eventsSize, uint32_t* count, Actor* filterActor, NvBlastLog logFn); /** Apply chunk fracture commands hierarchically, recording a fracture event for each health damage applied. In-Place version: fracture commands are replaced by fracture events. If inoutbuffer array is too small, events are dropped but the chunks are still damaged. \param chunkFractureCount number of chunk fracture commands to apply \param inoutbuffer array of chunk fracture commands to be replaced by events \param eventsSize number of available entries in inoutbuffer \param count returns the number of damaged chunks \param[in] filterActor pointer to the actor to filter commands that target other actors. May be NULL to apply all commands \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. */ void fractureInPlaceEvents(uint32_t chunkFractureCount, NvBlastChunkFractureData* inoutbuffer, uint32_t eventsSize, uint32_t* count, Actor* filterActor, NvBlastLog logFn); /** See NvBlastActorApplyFracture \param[in,out] eventBuffers Target buffers to hold applied fracture events. May be NULL, in which case events are not reported. To avoid data loss, provide an entry for every lower-support chunk and every bond in the original actor. \param[in,out] actor The NvBlastActor to apply fracture to. \param[in] commands The fracture commands to process. \param[in] filterActor pointer to the actor to filter commands that target other actors. May be NULL to apply all commands \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \param[in,out] timers If non-NULL this struct will be filled out with profiling information for the step, in profile build configurations. */ void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands, Actor* filterActor, NvBlastLog logFn, NvBlastTimers* timers); }; } // namespace Blast } // namespace Nv #include "NvBlastActor.h" namespace Nv { namespace Blast { //////// FamilyHeader inline methods //////// NV_INLINE Actor* FamilyHeader::borrowActor(uint32_t index) { NVBLAST_ASSERT(index < getActorsArraySize()); Actor& actor = getActors()[index]; if (actor.m_familyOffset == 0) { const uintptr_t offset = (uintptr_t)&actor - (uintptr_t)this; NVBLAST_ASSERT(offset <= UINT32_MAX); actor.m_familyOffset = (uint32_t)offset; atomicIncrement(reinterpret_cast<volatile int32_t*>(&m_actorCount)); } return &actor; } NV_INLINE void FamilyHeader::returnActor(Actor& actor) { if (actor.m_familyOffset != 0) { actor.m_familyOffset = 0; // The actor count should be positive since this actor was valid. Check to be safe. NVBLAST_ASSERT(m_actorCount > 0); atomicDecrement(reinterpret_cast<volatile int32_t*>(&m_actorCount)); } } NV_INLINE bool FamilyHeader::isActorActive(uint32_t index) const { NVBLAST_ASSERT(index < getActorsArraySize()); return getActors()[index].m_familyOffset != 0; } NV_INLINE Actor* FamilyHeader::getActorByIndex(uint32_t index) const { NVBLAST_ASSERT(index < getActorsArraySize()); Actor& actor = getActors()[index]; return actor.isActive() ? &actor : nullptr; } NV_INLINE uint32_t FamilyHeader::getChunkActorIndex(uint32_t chunkIndex) const { NVBLAST_ASSERT(m_asset); NVBLAST_ASSERT(chunkIndex < m_asset->m_chunkCount); if (chunkIndex < m_asset->getUpperSupportChunkCount()) { return getChunkActorIndices()[chunkIndex]; } else { return chunkIndex - (m_asset->getUpperSupportChunkCount() - m_asset->m_graph.m_nodeCount); } } NV_INLINE uint32_t FamilyHeader::getNodeActorIndex(uint32_t nodeIndex) const { NVBLAST_ASSERT(m_asset); NVBLAST_ASSERT(nodeIndex < m_asset->m_graph.m_nodeCount); const uint32_t chunkIndex = m_asset->m_graph.getChunkIndices()[nodeIndex]; return isInvalidIndex(chunkIndex) ? chunkIndex : getChunkActorIndices()[chunkIndex]; } NV_INLINE Actor* FamilyHeader::getChunkActor(uint32_t chunkIndex) const { uint32_t actorIndex = getChunkActorIndex(chunkIndex); return !isInvalidIndex(actorIndex) ? getActorByIndex(actorIndex) : nullptr; } NV_INLINE Actor* FamilyHeader::getNodeActor(uint32_t nodeIndex) const { uint32_t actorIndex = getNodeActorIndex(nodeIndex); return !isInvalidIndex(actorIndex) ? getActorByIndex(actorIndex) : nullptr; } //////// Global functions //////// /** Returns the number of bytes of memory that a family created using the given asset will require. A pointer to a block of memory of at least this size must be passed in as the mem argument of createFamily. \param[in] asset The asset that will be passed into NvBlastAssetCreateFamily. \param[in] sizeData Alternate version where the counts are known but there is not an existing asset. */ size_t getFamilyMemorySize(const Asset* asset); size_t getFamilyMemorySize(const NvBlastAssetMemSizeData& sizeData); } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFAMILY_H
17,257
C
39.228438
239
0.710726
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastActorSerializationBlock.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTACTORSERIALIZATIONBLOCK_H #define NVBLASTACTORSERIALIZATIONBLOCK_H #include "NvBlastFixedBoolArray.h" namespace Nv { namespace Blast { /** Struct-enum which keeps track of the actor serialization format. */ struct ActorSerializationFormat { enum Version { /** Initial version */ Initial, // New formats must come before Count. They should be given descriptive names with more information in comments. /** The number of serialization formats. */ Count, /** The current version. This should always be Count-1 */ Current = Count - 1 }; }; /** Data header at the beginning of a NvBlastActor serialization block The block address may be cast to a valid ActorSerializationHeader pointer. Serialization state is only valid if partition has been called since the last call to findIslands(). */ struct ActorSerializationHeader { /** A number which is incremented every time the data layout changes. */ uint32_t m_formatVersion; /** The size of the serialization block, including this header. Memory sizes are restricted to 32-bit representable values. */ uint32_t m_size; /** The index of the actor within its family. */ uint32_t m_index; /** The number of elements in the visible chunk indices list. */ uint32_t m_visibleChunkCount; /** The number of elements in the graph node indices list. */ uint32_t m_graphNodeCount; /** The number of leaf chunks in this actor. */ uint32_t m_leafChunkCount; /** Visible chunk indices, of type uint32_t. */ NvBlastBlockArrayData(uint32_t, m_visibleChunkIndicesOffset, getVisibleChunkIndices, m_visibleChunkCount); /** Graph node indices, of type uint32_t. */ NvBlastBlockArrayData(uint32_t, m_graphNodeIndicesOffset, getGraphNodeIndices, m_graphNodeCount); /** Healths for lower support chunks in this actor, in breadth-first order from the support chunks associated with the graph nodes. Type float. */ NvBlastBlockData(float, m_lowerSupportChunkHealthsOffset, getLowerSupportChunkHealths); /** Healths for bonds associated with support chunks in this actor, in order of graph adjacency from associated graph nodes, i < j only. Type float. */ NvBlastBlockData(float, m_bondHealthsOffset, getBondHealths); /** Fast route in instance graph calculated for each graph node in this actor, of type uint32_t. */ NvBlastBlockArrayData(uint32_t, m_fastRouteOffset, getFastRoute, m_graphNodeCount); /** Hop counts in instance graph calculated for each graph node in this actor, of type uint32_t. */ NvBlastBlockArrayData(uint32_t, m_hopCountsOffset, getHopCounts, m_graphNodeCount); /** "Edge removed" bits for bonds associated with support chunks in this actor, in order of graph adjacency from associated graph nodes, i < j only. Type FixedBoolArray. */ NvBlastBlockData(FixedBoolArray, m_edgeRemovedArrayOffset, getEdgeRemovedArray); }; //////// Global functions //////// /** A buffer size sufficient to serialize an actor with a given visible chunk count, lower support chunk count, graph node count, and bond count. \param[in] visibleChunkCount The number of visible chunks \param[in] lowerSupportChunkCount The number of lower-support chunks in the asset. \param[in] graphNodeCount The number of graph nodes in the asset. \param[in] bondCount The number of graph bonds in the asset. \return the required buffer size in bytes. */ NV_INLINE size_t getActorSerializationSize(uint32_t visibleChunkCount, uint32_t lowerSupportChunkCount, uint32_t graphNodeCount, uint32_t bondCount) { // Family offsets const size_t visibleChunkIndicesOffset = align16(sizeof(ActorSerializationHeader)); // size = visibleChunkCount*sizeof(uint32_t) const size_t graphNodeIndicesOffset = align16(visibleChunkIndicesOffset + visibleChunkCount*sizeof(uint32_t)); // size = graphNodeCount*sizeof(uint32_t) const size_t lowerSupportHealthsOffset = align16(graphNodeIndicesOffset + graphNodeCount*sizeof(uint32_t)); // size = lowerSupportChunkCount*sizeof(float) const size_t bondHealthsOffset = align16(lowerSupportHealthsOffset + lowerSupportChunkCount*sizeof(float)); // size = bondCount*sizeof(float) const size_t fastRouteOffset = align16(bondHealthsOffset + bondCount*sizeof(float)); // size = graphNodeCount*sizeof(uint32_t) const size_t hopCountsOffset = align16(fastRouteOffset + graphNodeCount*sizeof(uint32_t)); // size = graphNodeCount*sizeof(uint32_t) const size_t edgeRemovedArrayOffset = align16(hopCountsOffset + graphNodeCount*sizeof(uint32_t)); // size = 0 or FixedBoolArray::requiredMemorySize(bondCount) return align16(edgeRemovedArrayOffset + (bondCount == 0 ? 0 : FixedBoolArray::requiredMemorySize(bondCount))); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTACTORSERIALIZATIONBLOCK_H
6,710
C
38.710059
176
0.724292
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastFamilyGraph.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFAMILYGRAPH_H #define NVBLASTFAMILYGRAPH_H #include "NvBlastSupportGraph.h" #include "NvBlastFixedArray.h" #include "NvBlastFixedBitmap.h" #include "NvBlastFixedBoolArray.h" #include "NvBlastMath.h" #include "NvBlastFixedPriorityQueue.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { typedef uint32_t NodeIndex; typedef NodeIndex IslandId; typedef uint32_t ActorIndex; /** Internal implementation of family graph stored on the family. It processes full NvBlastSupportGraph graph, stores additional information used for faster islands finding, keeps and provides access to current islandId for every node. */ class FamilyGraph { public: //////// ctor //////// /** Constructor. family graph is meant to be placed (with placement new) on family memory. \param[in] nodeCount The number of nodes in the support graph (see SupportGraph) \param[in] bondCount The number of bonds in the support graph (see SupportGraph) */ FamilyGraph(uint32_t nodeCount, const uint32_t bondCount); /** Returns memory needed for this class (see fillMemory). \param[in] nodeCount The number of nodes in the graph. \param[in] bondCount The number of bonds in the graph. \return the number of bytes required. */ static size_t requiredMemorySize(uint32_t nodeCount, uint32_t bondCount) { return fillMemory(nullptr, nodeCount, bondCount); } //////// API //////// /** Function to initialize graph (all nodes added to dirty list for this actor) \param[in] actorIndex The index of the actor to initialize graph with. Must be in the range [0, m_nodeCount). \param[in] graph The static graph data for this family. */ void initialize(ActorIndex actorIndex, const SupportGraph* graph); /** Function to notify graph about removed edges. These nodes will be added to dirty list for this actor. Returns true if bond as removed. \param[in] actorIndex The index of the actor from which the edge is removed. Must be in the range [0, m_nodeCount). \param[in] node0 The index of the first node of removed edge. Must be in the range [0, m_nodeCount). \param[in] node1 The index of the second node of removed edge. Must be in the range [0, m_nodeCount). \param[in] graph The static graph data for this family. */ bool notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, const SupportGraph* graph); bool notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, uint32_t bondIndex, const SupportGraph* graph); bool notifyNodeRemoved(ActorIndex actorIndex, NodeIndex nodeIndex, const SupportGraph* graph); /** Function to find new islands by examining dirty nodes associated with this actor (they can be associated with actor if notifyEdgeRemoved() were previously called for it. \param[in] actorIndex The index of the actor on which graph part (edges + nodes) findIslands will be performed. Must be in the range [0, m_nodeCount). \param[in] scratch User-supplied scratch memory of size findIslandsRequiredScratch(graphNodeCount) bytes. \param[in] graph The static graph data for this family. \return the number of new islands found. */ uint32_t findIslands(ActorIndex actorIndex, void* scratch, const SupportGraph* graph); /** The scratch space required to call the findIslands function, in bytes. \param[in] graphNodeCount The number of nodes in the graph. \return the number of bytes required. */ static size_t findIslandsRequiredScratch(uint32_t graphNodeCount); //////// data getters //////// /** Utility function to get the start of the island ids array. This is an array of size nodeCount. Every islandId == NodeIndex of root node in this island, it is set for every Node. \return the array of island ids. */ NvBlastBlockData(IslandId, m_islandIdsOffset, getIslandIds); /** Utility function to get the start of the dirty node links array. This is an array of size nodeCount. */ NvBlastBlockData(NodeIndex, m_dirtyNodeLinksOffset, getDirtyNodeLinks); /** Utility function to get the start of the first dirty node indices array. This is an array of size nodeCount. */ NvBlastBlockData(uint32_t, m_firstDirtyNodeIndicesOffset, getFirstDirtyNodeIndices); /** Utility function to get the start of the fast route array. This is an array of size nodeCount. */ NvBlastBlockData(NodeIndex, m_fastRouteOffset, getFastRoute); /** Utility function to get the start of the hop counts array. This is an array of size nodeCount. */ NvBlastBlockData(uint32_t, m_hopCountsOffset, getHopCounts); /** Utility function to get the pointer of the is edge removed bitmap. This is an bitmap of size bondCount. */ NvBlastBlockData(FixedBoolArray, m_isEdgeRemovedOffset, getIsEdgeRemoved); /** Utility function to get the pointer of the is node in dirty list bitmap. This is an bitmap of size nodeCount. */ NvBlastBlockData(FixedBoolArray, m_isNodeInDirtyListOffset, getIsNodeInDirtyList); //////// Debug/Test //////// uint32_t getEdgesCount(const SupportGraph* graph) const; bool hasEdge(NodeIndex node0, NodeIndex node1, const SupportGraph* graph) const; bool canFindRoot(NodeIndex startNode, NodeIndex targetNode, FixedArray<NodeIndex>* visitedNodes, const SupportGraph* graph); private: FamilyGraph& operator = (const FamilyGraph&); //////// internal types //////// /** Used to represent current graph traverse state. */ struct TraversalState { NodeIndex mNodeIndex; uint32_t mCurrentIndex; uint32_t mPrevIndex; uint32_t mDepth; TraversalState() { } TraversalState(NodeIndex nodeIndex, uint32_t currentIndex, uint32_t prevIndex, uint32_t depth) : mNodeIndex(nodeIndex), mCurrentIndex(currentIndex), mPrevIndex(prevIndex), mDepth(depth) { } }; /** Queue element for graph traversal with priority queue. */ struct QueueElement { TraversalState* mState; uint32_t mHopCount; QueueElement() { } QueueElement(TraversalState* state, uint32_t hopCount) : mState(state), mHopCount(hopCount) { } }; /** Queue comparator for graph traversal with priority queue. */ struct NodeComparator { NodeComparator() { } bool operator() (const QueueElement& node0, const QueueElement& node1) const { return node0.mHopCount < node1.mHopCount; } private: NodeComparator& operator = (const NodeComparator&); }; /** PriorityQueue for graph traversal. Queue element with smallest hopCounts will be always on top. */ typedef FixedPriorityQueue<QueueElement, NodeComparator> NodePriorityQueue; //////// internal operations //////// /** Function calculate needed memory and feel it if familyGraph is passed. FamilyGraph is designed to use memory right after itself. So it should be initialized with placement new operation on memory of memoryNeeded() size. \param[in] familyGraph The pointer to actual FamilyGraph instance which will be filled. Can be nullptr, function will only return required bytes and do nothing. \param[in] nodeCount The number of nodes in the graph. \param[in] bondCount The number of bonds in the graph. \return the number of bytes required or filled */ static size_t fillMemory(FamilyGraph* familyGraph, uint32_t nodeCount, uint32_t bondCount); /** Function to find route from on node to another. It uses fastPath first as optimization and then if it fails it performs brute-force traverse (with hop count heuristic) */ bool findRoute(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, NodePriorityQueue* priorityQueue, const SupportGraph* graph); /** Function to try finding targetNode (from startNode) with getFastRoute(). */ bool tryFastPath(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, const SupportGraph* graph); /** Function to unwind route upon successful finding of root node or witness. We have found either a witness *or* the root node with this traversal. In the event of finding the root node, hopCount will be 0. In the event of finding a witness, hopCount will be the hopCount that witness reported as being the distance to the root. */ void unwindRoute(uint32_t traversalIndex, NodeIndex lastNode, uint32_t hopCount, IslandId id, FixedArray<TraversalState>* visitedNodes); /** Function to add node to dirty node list associated with actor. */ void addToDirtyNodeList(ActorIndex actorIndex, NodeIndex node); /** Function used to get adjacentNode using index from adjacencyPartition with check for bondHealths (if it's not removed already) */ NodeIndex getAdjacentNode(uint32_t adjacencyIndex, const SupportGraph* graph) const { const uint32_t bondIndex = graph->getAdjacentBondIndices()[adjacencyIndex]; return getIsEdgeRemoved()->test(bondIndex) ? invalidIndex<uint32_t>() : graph->getAdjacentNodeIndices()[adjacencyIndex]; } }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFAMILYGRAPH_H
11,342
C
37.063758
223
0.705519
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastFamilyGraph.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastFamilyGraph.h" #include "NvBlastAssert.h" #include <vector> #include <stack> #define SANITY_CHECKS 0 namespace Nv { namespace Blast { size_t FamilyGraph::fillMemory(FamilyGraph* familyGraph, uint32_t nodeCount, uint32_t bondCount) { // calculate all offsets, and dataSize as a result NvBlastCreateOffsetStart(sizeof(FamilyGraph)); const size_t NvBlastCreateOffsetAlign16(dirtyNodeLinksOffset, sizeof(NodeIndex) * nodeCount); const size_t NvBlastCreateOffsetAlign16(firstDirtyNodeIndicesOffset, sizeof(uint32_t) * nodeCount); const size_t NvBlastCreateOffsetAlign16(islandIdsOffset, sizeof(IslandId) * nodeCount); const size_t NvBlastCreateOffsetAlign16(fastRouteOffset, sizeof(NodeIndex) * nodeCount); const size_t NvBlastCreateOffsetAlign16(hopCountsOffset, sizeof(uint32_t) * nodeCount); const size_t NvBlastCreateOffsetAlign16(isEdgeRemovedOffset, FixedBoolArray::requiredMemorySize(bondCount)); const size_t NvBlastCreateOffsetAlign16(isNodeInDirtyListOffset, FixedBoolArray::requiredMemorySize(nodeCount)); const size_t dataSize = NvBlastCreateOffsetEndAlign16(); // fill only if familyGraph was passed (otherwise we just used this function to get dataSize) if (familyGraph) { familyGraph->m_dirtyNodeLinksOffset = static_cast<uint32_t>(dirtyNodeLinksOffset); familyGraph->m_firstDirtyNodeIndicesOffset = static_cast<uint32_t>(firstDirtyNodeIndicesOffset); familyGraph->m_islandIdsOffset = static_cast<uint32_t>(islandIdsOffset); familyGraph->m_fastRouteOffset = static_cast<uint32_t>(fastRouteOffset); familyGraph->m_hopCountsOffset = static_cast<uint32_t>(hopCountsOffset); familyGraph->m_isEdgeRemovedOffset = static_cast<uint32_t>(isEdgeRemovedOffset); familyGraph->m_isNodeInDirtyListOffset = static_cast<uint32_t>(isNodeInDirtyListOffset); new (familyGraph->getIsEdgeRemoved()) FixedBoolArray(bondCount); new (familyGraph->getIsNodeInDirtyList()) FixedBoolArray(nodeCount); } return dataSize; } FamilyGraph::FamilyGraph(uint32_t nodeCount, const uint32_t bondCount) { // fill memory with all internal data // we need chunks count for size calculation fillMemory(this, nodeCount, bondCount); // fill arrays with invalid indices / max value (0xFFFFFFFF) memset(getIslandIds(), 0xFF, nodeCount*sizeof(uint32_t)); memset(getFastRoute(), 0xFF, nodeCount*sizeof(uint32_t)); memset(getHopCounts(), 0xFF, nodeCount*sizeof(uint32_t)); // Initializing to large value memset(getDirtyNodeLinks(), 0xFF, nodeCount*sizeof(uint32_t)); // No dirty list initially memset(getFirstDirtyNodeIndices(), 0xFF, nodeCount*sizeof(uint32_t)); getIsNodeInDirtyList()->clear(); getIsEdgeRemoved()->fill(); } /** Graph initialization, reset all internal data to initial state. Marks all nodes dirty for this actor. First island search probably would be the longest one, as it has to traverse whole graph and set all the optimization stuff like fastRoute and hopCounts for all nodes. */ void FamilyGraph::initialize(ActorIndex actorIndex, const SupportGraph* graph) { // used internal data pointers NodeIndex* dirtyNodeLinks = getDirtyNodeLinks(); uint32_t* firstDirtyNodeIndices = getFirstDirtyNodeIndices(); // link dirty nodes for (NodeIndex node = 1; node < graph->m_nodeCount; node++) { dirtyNodeLinks[node-1] = node; } firstDirtyNodeIndices[actorIndex] = 0; getIsNodeInDirtyList()->fill(); getIsEdgeRemoved()->clear(); } void FamilyGraph::addToDirtyNodeList(ActorIndex actorIndex, NodeIndex node) { // used internal data pointers FixedBoolArray* isNodeInDirtyList = getIsNodeInDirtyList(); NodeIndex* dirtyNodeLinks = getDirtyNodeLinks(); uint32_t* firstDirtyNodeIndices = getFirstDirtyNodeIndices(); // check for bitmap first for avoid O(n) list search if (isNodeInDirtyList->test(node)) return; // add node to dirty node list head dirtyNodeLinks[node] = firstDirtyNodeIndices[actorIndex]; firstDirtyNodeIndices[actorIndex] = node; isNodeInDirtyList->set(node); } /** Removes fast routes and marks involved nodes as dirty */ bool FamilyGraph::notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, const SupportGraph* graph) { NVBLAST_ASSERT(node0 < graph->m_nodeCount); NVBLAST_ASSERT(node1 < graph->m_nodeCount); // used internal data pointers NodeIndex* fastRoute = getFastRoute(); const uint32_t* adjacencyPartition = graph->getAdjacencyPartition(); const uint32_t* adjacentBondIndices = graph->getAdjacentBondIndices(); // search for bond for (uint32_t adjacencyIndex = adjacencyPartition[node0]; adjacencyIndex < adjacencyPartition[node0 + 1]; adjacencyIndex++) { if (getAdjacentNode(adjacencyIndex, graph) == node1) { // found bond const uint32_t bondIndex = adjacentBondIndices[adjacencyIndex]; // remove bond getIsEdgeRemoved()->set(bondIndex); // broke fast route if it goes through this edge: if (fastRoute[node0] == node1) fastRoute[node0] = invalidIndex<uint32_t>(); if (fastRoute[node1] == node0) fastRoute[node1] = invalidIndex<uint32_t>(); // mark nodes dirty (add to list if doesn't exist) addToDirtyNodeList(actorIndex, node0); addToDirtyNodeList(actorIndex, node1); // we don't expect to be more than one bond between 2 nodes return true; } } return false; } bool FamilyGraph::notifyEdgeRemoved(ActorIndex actorIndex, NodeIndex node0, NodeIndex node1, uint32_t bondIndex, const SupportGraph* graph) { NV_UNUSED(graph); NVBLAST_ASSERT(node0 < graph->m_nodeCount); NVBLAST_ASSERT(node1 < graph->m_nodeCount); getIsEdgeRemoved()->set(bondIndex); NodeIndex* fastRoute = getFastRoute(); // broke fast route if it goes through this edge: if (fastRoute[node0] == node1) fastRoute[node0] = invalidIndex<uint32_t>(); if (fastRoute[node1] == node0) fastRoute[node1] = invalidIndex<uint32_t>(); // mark nodes dirty (add to list if doesn't exist) addToDirtyNodeList(actorIndex, node0); addToDirtyNodeList(actorIndex, node1); return true; } bool FamilyGraph::notifyNodeRemoved(ActorIndex actorIndex, NodeIndex nodeIndex, const SupportGraph* graph) { NVBLAST_ASSERT(nodeIndex < graph->m_nodeCount); // used internal data pointers NodeIndex* fastRoute = getFastRoute(); const uint32_t* adjacencyPartition = graph->getAdjacencyPartition(); const uint32_t* adjacentBondIndices = graph->getAdjacentBondIndices(); // remove all edges leaving this node for (uint32_t adjacencyIndex = adjacencyPartition[nodeIndex]; adjacencyIndex < adjacencyPartition[nodeIndex + 1]; adjacencyIndex++) { const uint32_t adjacentNodeIndex = getAdjacentNode(adjacencyIndex, graph); if (!isInvalidIndex(adjacentNodeIndex)) { const uint32_t bondIndex = adjacentBondIndices[adjacencyIndex]; getIsEdgeRemoved()->set(bondIndex); if (fastRoute[adjacentNodeIndex] == nodeIndex) fastRoute[adjacentNodeIndex] = invalidIndex<uint32_t>(); if (fastRoute[nodeIndex] == adjacentNodeIndex) fastRoute[nodeIndex] = invalidIndex<uint32_t>(); addToDirtyNodeList(actorIndex, adjacentNodeIndex); } } addToDirtyNodeList(actorIndex, nodeIndex); // ignore this node in partition (only needed for "chunk deleted from graph") // getIslandIds()[nodeIndex] = invalidIndex<uint32_t>(); return true; } void FamilyGraph::unwindRoute(uint32_t traversalIndex, NodeIndex lastNode, uint32_t hopCount, IslandId id, FixedArray<TraversalState>* visitedNodes) { // used internal data pointers IslandId* islandIds = getIslandIds(); NodeIndex* fastRoute = getFastRoute(); uint32_t* hopCounts = getHopCounts(); uint32_t currIndex = traversalIndex; uint32_t hc = hopCount + 1; //Add on 1 for the hop to the witness/root node. do { TraversalState& state = visitedNodes->at(currIndex); hopCounts[state.mNodeIndex] = hc++; islandIds[state.mNodeIndex] = id; fastRoute[state.mNodeIndex] = lastNode; currIndex = state.mPrevIndex; lastNode = state.mNodeIndex; } while(currIndex != invalidIndex<uint32_t>()); } bool FamilyGraph::tryFastPath(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, const SupportGraph* graph) { NV_UNUSED(graph); // used internal data pointers IslandId* islandIds = getIslandIds(); NodeIndex* fastRoute = getFastRoute(); // prepare for iterating path NodeIndex currentNode = startNode; uint32_t visitedNotesInitialSize = visitedNodes->size(); uint32_t depth = 0; bool found = false; do { // witness ? if (isNodeWitness->test(currentNode)) { // Already visited and not tagged with invalid island == a witness! found = islandIds[currentNode] != invalidIndex<uint32_t>(); break; } // reached targetNode ? if (currentNode == targetNode) { found = true; break; } TraversalState state(currentNode, visitedNodes->size(), visitedNodes->size() - 1, depth++); visitedNodes->pushBack(state); NVBLAST_ASSERT(isInvalidIndex(fastRoute[currentNode]) || hasEdge(currentNode, fastRoute[currentNode], graph)); islandIds[currentNode] = invalidIndex<uint32_t>(); isNodeWitness->set(currentNode); currentNode = fastRoute[currentNode]; } while (currentNode != invalidIndex<uint32_t>()); for (uint32_t a = visitedNotesInitialSize; a < visitedNodes->size(); ++a) { TraversalState& state = visitedNodes->at(a); islandIds[state.mNodeIndex] = islandId; } // if fast path failed we have to remove all isWitness marks on visited nodes and nodes from visited list if (!found) { for (uint32_t a = visitedNotesInitialSize; a < visitedNodes->size(); ++a) { TraversalState& state = visitedNodes->at(a); isNodeWitness->reset(state.mNodeIndex); } visitedNodes->forceSize_Unsafe(visitedNotesInitialSize); } return found; } bool FamilyGraph::findRoute(NodeIndex startNode, NodeIndex targetNode, IslandId islandId, FixedArray<TraversalState>* visitedNodes, FixedBitmap* isNodeWitness, NodePriorityQueue* priorityQueue, const SupportGraph* graph) { // used internal data pointers IslandId* islandIds = getIslandIds(); NodeIndex* fastRoute = getFastRoute(); uint32_t* hopCounts = getHopCounts(); const uint32_t* adjacencyPartition = graph->getAdjacencyPartition(); // Firstly, traverse the fast path and tag up witnesses. TryFastPath can fail. In that case, no witnesses are left but this node is permitted to report // that it is still part of the island. Whichever node lost its fast path will be tagged as dirty and will be responsible for recovering the fast path // and tagging up the visited nodes if (fastRoute[startNode] != invalidIndex<uint32_t>()) { if (tryFastPath(startNode, targetNode, islandId, visitedNodes, isNodeWitness, graph)) return true; } // If we got here, there was no fast path. Therefore, we need to fall back on searching for the root node. This is optimized by using "hop counts". // These are per-node counts that indicate the expected number of hops from this node to the root node. These are lazily evaluated and updated // as new edges are formed or when traversals occur to re-establish islands. As a result, they may be inaccurate but they still serve the purpose // of guiding our search to minimize the chances of us doing an exhaustive search to find the root node. islandIds[startNode] = invalidIndex<uint32_t>(); TraversalState startTraversal(startNode, visitedNodes->size(), invalidIndex<uint32_t>(), 0); isNodeWitness->set(startNode); QueueElement element(&visitedNodes->pushBack(startTraversal), hopCounts[startNode]); priorityQueue->push(element); do { QueueElement currentQE = priorityQueue->pop(); TraversalState& currentState = *currentQE.mState; NodeIndex& currentNode = currentState.mNodeIndex; // iterate all edges of currentNode for (uint32_t adjacencyIndex = adjacencyPartition[currentNode]; adjacencyIndex < adjacencyPartition[currentNode + 1]; adjacencyIndex++) { NodeIndex nextIndex = getAdjacentNode(adjacencyIndex, graph); if (nextIndex != invalidIndex<uint32_t>()) { if (nextIndex == targetNode) { // targetNode found! unwindRoute(currentState.mCurrentIndex, nextIndex, 0, islandId, visitedNodes); return true; } if (isNodeWitness->test(nextIndex)) { // We already visited this node. This means that it's either in the priority queue already or we // visited in on a previous pass. If it was visited on a previous pass, then it already knows what island it's in. // We now need to test the island id to find out if this node knows the root. // If it has a valid root id, that id *is* our new root. We can guesstimate our hop count based on the node's properties IslandId visitedIslandId = islandIds[nextIndex]; if (visitedIslandId != invalidIndex<uint32_t>()) { // If we get here, we must have found a node that knows a route to our root node. It must not be a different island // because that would caused me to have been visited already because totally separate islands trigger a full traversal on // the orphaned side. NVBLAST_ASSERT(visitedIslandId == islandId); unwindRoute(currentState.mCurrentIndex, nextIndex, hopCounts[nextIndex], islandId, visitedNodes); return true; } } else { // This node has not been visited yet, so we need to push it into the stack and continue traversing TraversalState state(nextIndex, visitedNodes->size(), currentState.mCurrentIndex, currentState.mDepth + 1); QueueElement qe(&visitedNodes->pushBack(state), hopCounts[nextIndex]); priorityQueue->push(qe); isNodeWitness->set(nextIndex); NVBLAST_ASSERT(islandIds[nextIndex] == islandId); islandIds[nextIndex] = invalidIndex<uint32_t>(); //Flag as invalid island until we know whether we can find root or an island id. } } } } while (priorityQueue->size()); return false; } size_t FamilyGraph::findIslandsRequiredScratch(uint32_t graphNodeCount) { const size_t visitedNodesSize = align16(FixedArray<TraversalState>::requiredMemorySize(graphNodeCount)); const size_t isNodeWitnessSize = align16(FixedBitmap::requiredMemorySize(graphNodeCount)); const size_t priorityQueueSize = align16(NodePriorityQueue::requiredMemorySize(graphNodeCount)); // Aligned and padded return 16 + visitedNodesSize + isNodeWitnessSize + priorityQueueSize; } uint32_t FamilyGraph::findIslands(ActorIndex actorIndex, void* scratch, const SupportGraph* graph) { // check if we have at least 1 dirty node for this actor before proceeding uint32_t* firstDirtyNodeIndices = getFirstDirtyNodeIndices(); if (isInvalidIndex(firstDirtyNodeIndices[actorIndex])) return 0; // used internal data pointers IslandId* islandIds = getIslandIds(); NodeIndex* fastRoute = getFastRoute(); uint32_t* hopCounts = getHopCounts(); NodeIndex* dirtyNodeLinks = getDirtyNodeLinks(); FixedBoolArray* isNodeInDirtyList = getIsNodeInDirtyList(); // prepare intermediate data on scratch scratch = (void*)align16((size_t)scratch); // Bump to 16-byte alignment (see padding in findIslandsRequiredScratch) const uint32_t nodeCount = graph->m_nodeCount; FixedArray<TraversalState>* visitedNodes = new (scratch)FixedArray<TraversalState>(); scratch = pointerOffset(scratch, align16(FixedArray<TraversalState>::requiredMemorySize(nodeCount))); FixedBitmap* isNodeWitness = new (scratch)FixedBitmap(nodeCount); scratch = pointerOffset(scratch, align16(FixedBitmap::requiredMemorySize(nodeCount))); NodePriorityQueue* priorityQueue = new (scratch)NodePriorityQueue(); scratch = pointerOffset(scratch, align16(NodePriorityQueue::requiredMemorySize(nodeCount))); // reset nodes visited bitmap isNodeWitness->clear(); uint32_t newIslandsCount = 0; while (!isInvalidIndex(firstDirtyNodeIndices[actorIndex])) { // Pop head off of dirty node's list const NodeIndex dirtyNode = firstDirtyNodeIndices[actorIndex]; firstDirtyNodeIndices[actorIndex] = dirtyNodeLinks[dirtyNode]; dirtyNodeLinks[dirtyNode] = invalidIndex<uint32_t>(); NVBLAST_ASSERT(isNodeInDirtyList->test(dirtyNode)); isNodeInDirtyList->reset(dirtyNode); // clear PriorityQueue priorityQueue->clear(); // if we already visited this node before in this loop it's not dirty anymore if (isNodeWitness->test(dirtyNode)) continue; const IslandId& islandRootNode = islandIds[dirtyNode]; IslandId islandId = islandRootNode; // the same in this implementation // if this node is island root node we don't need to do anything if (islandRootNode == dirtyNode) continue; // clear visited notes list (to fill during traverse) visitedNodes->clear(); // try finding island root node from this dirtyNode if (findRoute(dirtyNode, islandRootNode, islandId, visitedNodes, isNodeWitness, priorityQueue, graph)) { // We found the root node so let's let every visited node know that we found its root // and we can also update our hop counts because we recorded how many hops it took to reach this // node // We already filled in the path to the root/witness with accurate hop counts. Now we just need to fill in the estimates // for the remaining nodes and re-define their islandIds. We approximate their path to the root by just routing them through // the route we already found. // This loop works because visitedNodes are recorded in the order they were visited and we already filled in the critical path // so the remainder of the paths will just fork from that path. for (uint32_t b = 0; b < visitedNodes->size(); ++b) { TraversalState& state = visitedNodes->at(b); if (isInvalidIndex(islandIds[state.mNodeIndex])) { hopCounts[state.mNodeIndex] = hopCounts[visitedNodes->at(state.mPrevIndex).mNodeIndex] + 1; fastRoute[state.mNodeIndex] = visitedNodes->at(state.mPrevIndex).mNodeIndex; islandIds[state.mNodeIndex] = islandId; } } } else { // NEW ISLAND BORN! // If I traversed and could not find the root node, then I have established a new island. In this island, I am the root node // and I will point all my nodes towards me. Furthermore, I have established how many steps it took to reach all nodes in my island // OK. We need to separate the islands. We have a list of nodes that are part of the new island (visitedNodes) and we know that the // first node in that list is the root node. #if SANITY_CHECKS NVBLAST_ASSERT(!canFindRoot(dirtyNode, islandRootNode, NULL)); #endif IslandId newIsland = dirtyNode; newIslandsCount++; hopCounts[dirtyNode] = 0; fastRoute[dirtyNode] = invalidIndex<uint32_t>(); islandIds[dirtyNode] = newIsland; for (uint32_t a = 1; a < visitedNodes->size(); ++a) { NodeIndex visitedNode = visitedNodes->at(a).mNodeIndex; hopCounts[visitedNode] = visitedNodes->at(a).mDepth; //How many hops to root fastRoute[visitedNode] = visitedNodes->at(visitedNodes->at(a).mPrevIndex).mNodeIndex; islandIds[visitedNode] = newIsland; } } } // all dirty nodes processed return newIslandsCount; } /** !!! Debug/Test function. Function to check that root between nodes exists. */ bool FamilyGraph::canFindRoot(NodeIndex startNode, NodeIndex targetNode, FixedArray<NodeIndex>* visitedNodes, const SupportGraph* graph) { if (visitedNodes) visitedNodes->pushBack(startNode); if (startNode == targetNode) return true; std::vector<bool> visitedState; visitedState.resize(graph->m_nodeCount); for (uint32_t i = 0; i < graph->m_nodeCount; i++) visitedState[i] = false; std::stack<NodeIndex> stack; stack.push(startNode); visitedState[startNode] = true; const uint32_t* adjacencyPartition = graph->getAdjacencyPartition(); do { NodeIndex currentNode = stack.top(); stack.pop(); for (uint32_t adjacencyIndex = adjacencyPartition[currentNode]; adjacencyIndex < adjacencyPartition[currentNode + 1]; adjacencyIndex++) { NodeIndex nextNode = getAdjacentNode(adjacencyIndex, graph); if (isInvalidIndex(nextNode)) continue; if (!visitedState[nextNode]) { if (nextNode == targetNode) { return true; } visitedState[nextNode] = true; stack.push(nextNode); if (visitedNodes) visitedNodes->pushBack(nextNode); } } } while (!stack.empty()); return false; } /** !!! Debug/Test function. Function to check if edge exists. */ bool FamilyGraph::hasEdge(NodeIndex node0, NodeIndex node1, const SupportGraph* graph) const { const uint32_t* adjacencyPartition = graph->getAdjacencyPartition(); uint32_t edges = 0; for (uint32_t adjacencyIndex = adjacencyPartition[node0]; adjacencyIndex < adjacencyPartition[node0 + 1]; adjacencyIndex++) { if (getAdjacentNode(adjacencyIndex, graph) == node1) { edges++; break; } } for (uint32_t adjacencyIndex = adjacencyPartition[node1]; adjacencyIndex < adjacencyPartition[node1 + 1]; adjacencyIndex++) { if (getAdjacentNode(adjacencyIndex, graph) == node0) { edges++; break; } } return edges > 0; } /** !!! Debug/Test function. Function to calculate and return edges count */ uint32_t FamilyGraph::getEdgesCount(const SupportGraph* graph) const { const uint32_t* adjacencyPartition = graph->getAdjacencyPartition(); uint32_t edges = 0; for (NodeIndex n = 0; n < graph->m_nodeCount; n++) { for (uint32_t adjacencyIndex = adjacencyPartition[n]; adjacencyIndex < adjacencyPartition[n + 1]; adjacencyIndex++) { if (getAdjacentNode(adjacencyIndex, graph) != invalidIndex<uint32_t>()) edges++; } } NVBLAST_ASSERT(edges % 2 == 0); return edges / 2; } } // namespace Nv } // namespace Blast
25,750
C++
38.986025
220
0.668738
NVIDIA-Omniverse/PhysX/blast/source/sdk/lowlevel/NvBlastAsset.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTASSET_H #define NVBLASTASSET_H #include "NvBlastSupportGraph.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastIndexFns.h" #include "NvBlastChunkHierarchy.h" namespace Nv { namespace Blast { class Asset : public NvBlastAsset { public: /** Struct-enum which is used to mark chunk descriptors when building an asset. */ struct ChunkAnnotation { enum Enum { Parent = (1 << 0), Support = (1 << 1), SuperSupport = (1 << 2), // Combinations UpperSupport = Support | SuperSupport }; }; /** Create an asset from a descriptor. \param[in] mem Pointer to block of memory of at least the size given by getMemorySize(desc). Must be 16-byte aligned. \param[in] desc Asset descriptor (see NvBlastAssetDesc). \param[in] scratch User-supplied scratch memory of size createRequiredScratch(desc) bytes. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return the pointer to the new asset, or nullptr if unsuccessful. */ static Asset* create(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn); /** Returns the number of bytes of memory that an asset created using the given descriptor will require. A pointer to a block of memory of at least this size must be passed in as the mem argument of create. \param[in] desc The asset descriptor that will be passed into NvBlastCreateAsset. */ static size_t getMemorySize(const NvBlastAssetDesc* desc); /** Returns the size of the scratch space (in bytes) required to be passed into the create function, based upon the input descriptor that will be passed to the create function. \param[in] desc The descriptor that will be passed to the create function. \return the number of bytes required. */ static size_t createRequiredScratch(const NvBlastAssetDesc* desc, NvBlastLog logFn); /** Returns the number of upper-support chunks in this asset.. \return the number of upper-support chunks. */ uint32_t getUpperSupportChunkCount() const; /** Returns the number of lower-support chunks in this asset. This is the required actor buffer size for a Actor family. \return the number of lower-support chunks. */ uint32_t getLowerSupportChunkCount() const; /** Returns the number of bonds in this asset's support graph. \return the number of bonds in this asset's support graph. */ uint32_t getBondCount() const; /** Returns the number of separate chunk hierarchies in the asset. This will be the initial number of visible chunks in an actor instanced from this asset. \return the number of separate chunk hierarchies in the asset. */ uint32_t getHierarchyCount() const; /** Maps all lower-support chunk indices to a contiguous range [0, getLowerSupportChunkCount()). \param[in] chunkIndex Asset chunk index. \return an index in the range [0, getLowerSupportChunkCount()) if it is a lower-support chunk, invalidIndex<uint32_t>() otherwise. */ uint32_t getContiguousLowerSupportIndex(uint32_t chunkIndex) const; // Static functions /** Function to ensure support coverage of chunks. Support chunks (marked in the NvBlastChunkDesc struct) must provide full coverage over the asset. This means that from any leaf chunk to the root node, exactly one chunk must be support. If this condition is not met, the actual support chunks will be adjusted accordingly. Chunk order depends on support coverage, so this function should be called before chunk reordering. \param[out] supportChunkCount The number of support chunks. NOTE - this value is not meaninful if testOnly = true and the return value is false. \param[out] leafChunkCount The number of leaf chunks. NOTE - this value is not meaninful if testOnly = true and the return value is false. \param[out] chunkAnnotation User-supplied char array of size chunkCount. NOTE - these values are not meaninful if testOnly = true and the return value is false. \param[in] chunkCount The number of chunk descriptors. \param[in] chunkDescs Array of chunk descriptors of size chunkCount. It will be updated accordingly. \param[in] testOnly If true, this function early-outs if support coverage is not exact. If false, exact coverage is ensured by possibly modifying chunkDescs' flags. \param[in] logFn User-supplied message function (see NvBlastLog definition). May be NULL. \return true iff coverage was already exact. */ static bool ensureExactSupportCoverage(uint32_t& supportChunkCount, uint32_t& leafChunkCount, char* chunkAnnotation, uint32_t chunkCount, NvBlastChunkDesc* chunkDescs, bool testOnly, NvBlastLog logFn); /** Tests a set of chunk descriptors to see if chunks are in valid chunk order. Chunk order conditions checked: 1. 'all chunks with same parent index should go in a row'. 2. 'chunks should come after their parents'. 3. 'root chunks should go first'. 4. 'upper-support chunks should come before subsupport chunks'. \param[in] chunkCount The number of chunk descriptors. \param[in] chunkDescs An array of chunk descriptors of length chunkCount. \param[in] chunkAnnotation Annotation generated from ensureExactSupportCoverage (see ensureExactSupportCoverage). \param[in] scratch User-supplied scratch memory of chunkCount bytes. \return true if the descriptors meet the ordering conditions, false otherwise. */ static bool testForValidChunkOrder(uint32_t chunkCount, const NvBlastChunkDesc* chunkDescs, const char* chunkAnnotation, void* scratch); //////// Data //////// /** Asset data block header. */ NvBlastDataBlock m_header; /** ID for this asset. */ NvBlastID m_ID; /** The total number of chunks in the asset, support and non-support. */ uint32_t m_chunkCount; /** The support graph. */ SupportGraph m_graph; /** The number of leaf chunks in the asset. */ uint32_t m_leafChunkCount; /** Chunks are sorted such that subsupport chunks come last. This is the first subsupport chunk index. Equals m_chunkCount if there are no subsupport chunks. */ uint32_t m_firstSubsupportChunkIndex; /** The number of bonds in the asset. */ uint32_t m_bondCount; /** Chunks, of type NvBlastChunk. getChunks returns an array of size m_chunkCount. */ NvBlastBlockArrayData(NvBlastChunk, m_chunksOffset, getChunks, m_chunkCount); /** Array of bond data for the interfaces between two chunks. Since the bond is shared by two chunks, the same bond data is used for chunk[i] -> chunk[j] as for chunk[j] -> chunk[i]. The size of the array is m_graph.adjacencyPartition[m_graph.m_nodeCount]/2. See NvBlastBond. getBonds returns an array of size m_bondCount. */ NvBlastBlockArrayData(NvBlastBond, m_bondsOffset, getBonds, m_bondCount); /** Caching the number of leaf chunks descended from each chunk (including the chunk itself). This data parallels the Chunks array, and is an array of the same size. getSubtreeLeafChunkCount returns a uint32_t array of size m_chunkCount. */ NvBlastBlockArrayData(uint32_t, m_subtreeLeafChunkCountsOffset, getSubtreeLeafChunkCounts, m_chunkCount); /** Mapping from chunk index to graph node index (inverse of m_graph.getChunkIndices(). getChunkToGraphNodeMap returns a uint32_t array of size m_chunkCount. */ NvBlastBlockArrayData(uint32_t, m_chunkToGraphNodeMapOffset, getChunkToGraphNodeMap, m_chunkCount); //////// Iterators //////// /** Chunk hierarchy depth-first iterator. Traverses subtree with root given by startChunkIndex. If upperSupportOnly == true, then the iterator will not traverse subsuppport chunks. */ class DepthFirstIt : public ChunkDepthFirstIt { public: /** Constructed from an asset. */ DepthFirstIt(const Asset& asset, uint32_t startChunkIndex, bool upperSupportOnly = false) : ChunkDepthFirstIt(asset.getChunks(), startChunkIndex, upperSupportOnly ? asset.getUpperSupportChunkCount() : asset.m_chunkCount) {} }; }; //////// Asset inline member functions //////// NV_INLINE uint32_t Asset::getUpperSupportChunkCount() const { return m_firstSubsupportChunkIndex; } NV_INLINE uint32_t Asset::getLowerSupportChunkCount() const { return m_graph.m_nodeCount + (m_chunkCount - m_firstSubsupportChunkIndex); } NV_INLINE uint32_t Asset::getBondCount() const { NVBLAST_ASSERT((m_graph.getAdjacencyPartition()[m_graph.m_nodeCount] & 1) == 0); // The bidirectional graph data should have an even number of edges return m_graph.getAdjacencyPartition()[m_graph.m_nodeCount] / 2; // Directional bonds, divide by two } NV_INLINE uint32_t Asset::getHierarchyCount() const { const NvBlastChunk* chunks = getChunks(); for (uint32_t i = 0; i < m_chunkCount; ++i) { if (!isInvalidIndex(chunks[i].parentChunkIndex)) { return i; } } return m_chunkCount; } NV_INLINE uint32_t Asset::getContiguousLowerSupportIndex(uint32_t chunkIndex) const { NVBLAST_ASSERT(chunkIndex < m_chunkCount); return chunkIndex < m_firstSubsupportChunkIndex ? getChunkToGraphNodeMap()[chunkIndex] : (chunkIndex - m_firstSubsupportChunkIndex + m_graph.m_nodeCount); } //JDM: Expose this so serialization layer can use it. NV_C_API Asset* initializeAsset(void* mem, uint32_t chunkCount, uint32_t graphNodeCount, uint32_t leafChunkCount, uint32_t firstSubsupportChunkIndex, uint32_t bondCount, NvBlastLog logFn); } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTASSET_H
11,691
C
36.354632
209
0.70473
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvGpuDispatcher.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_GPU_DISPATCHER_H #define NV_GPU_DISPATCHER_H #include "NvTaskDefine.h" #include "NvTask.h" /* forward decl to avoid including <cuda.h> */ typedef struct CUstream_st* CUstream; namespace nvidia { namespace cudamanager { struct NvGpuCopyDesc; class NvCudaContextManager; } namespace task { NV_PUSH_PACK_DEFAULT class NvTaskManager; /** \brief A GpuTask dispatcher * * A NvGpuDispatcher executes GpuTasks submitted by one or more TaskManagers (one * or more scenes). It maintains a CPU worker thread which waits on GpuTask * "groups" to be submitted. The submission API is explicitly sessioned so that * GpuTasks are dispatched together as a group whenever possible to improve * parallelism on the GPU. * * A NvGpuDispatcher cannot be allocated ad-hoc, they are created as a result of * creating a NvCudaContextManager. Every NvCudaContextManager has a NvGpuDispatcher * instance that can be queried. In this way, each NvGpuDispatcher is tied to * exactly one CUDA context. * * A scene will use CPU fallback Tasks for GpuTasks if the NvTaskManager provided * to it does not have a NvGpuDispatcher. For this reason, the NvGpuDispatcher must * be assigned to the NvTaskManager before the NvTaskManager is given to a scene. * * Multiple TaskManagers may safely share a single NvGpuDispatcher instance, thus * enabling scenes to share a CUDA context. * * Only failureDetected() is intended for use by the user. The rest of the * nvGpuDispatcher public methods are reserved for internal use by only both * TaskManagers and GpuTasks. */ class NvGpuDispatcher { public: /** \brief Record the start of a simulation step * * A NvTaskManager calls this function to record the beginning of a simulation * step. The NvGpuDispatcher uses this notification to initialize the * profiler state. */ virtual void startSimulation() = 0; /** \brief Record the start of a GpuTask batch submission * * A NvTaskManager calls this function to notify the NvGpuDispatcher that one or * more GpuTasks are about to be submitted for execution. The NvGpuDispatcher * will not read the incoming task queue until it receives one finishGroup() * call for each startGroup() call. This is to ensure as many GpuTasks as * possible are executed together as a group, generating optimal parallelism * on the GPU. */ virtual void startGroup() = 0; /** \brief Submit a GpuTask for execution * * Submitted tasks are pushed onto an incoming queue. The NvGpuDispatcher * will take the contents of this queue every time the pending group count * reaches 0 and run the group of submitted GpuTasks as an interleaved * group. */ virtual void submitTask(NvTask& task) = 0; /** \brief Record the end of a GpuTask batch submission * * A NvTaskManager calls this function to notify the NvGpuDispatcher that it is * done submitting a group of GpuTasks (GpuTasks which were all make ready * to run by the same prerequisite dependency becoming resolved). If no * other group submissions are in progress, the NvGpuDispatcher will execute * the set of ready tasks. */ virtual void finishGroup() = 0; /** \brief Add a CUDA completion prerequisite dependency to a task * * A GpuTask calls this function to add a prerequisite dependency on another * task (usually a CpuTask) preventing that task from starting until all of * the CUDA kernels and copies already launched have been completed. The * NvGpuDispatcher will increment that task's reference count, blocking its * execution, until the CUDA work is complete. * * This is generally only required when a CPU task is expecting the results * of the CUDA kernels to have been copied into host memory. * * This mechanism is not at all not required to ensure CUDA kernels and * copies are issued in the correct order. Kernel issue order is determined * by normal task dependencies. The rule of thumb is to only use a blocking * completion prerequisite if the task in question depends on a completed * GPU->Host DMA. * * The NvGpuDispatcher issues a blocking event record to CUDA for the purposes * of tracking the already submitted CUDA work. When this event is * resolved, the NvGpuDispatcher manually decrements the reference count of * the specified task, allowing it to execute (assuming it does not have * other pending prerequisites). */ virtual void addCompletionPrereq(NvBaseTask& task) = 0; /** \brief Retrieve the NvCudaContextManager associated with this * NvGpuDispatcher * * Every NvCudaContextManager has one NvGpuDispatcher, and every NvGpuDispatcher * has one NvCudaContextManager. */ virtual cudamanager::NvCudaContextManager* getCudaContextManager() = 0; /** \brief Record the end of a simulation frame * * A NvTaskManager calls this function to record the completion of its * dependency graph. If profiling is enabled, the NvGpuDispatcher will * trigger the retrieval of profiling data from the GPU at this point. */ virtual void stopSimulation() = 0; /** \brief Returns true if a CUDA call has returned a non-recoverable error * * A return value of true indicates a fatal error has occurred. To protect * itself, the NvGpuDispatcher enters a fall through mode that allows GpuTasks * to complete without being executed. This allows simulations to continue * but leaves GPU content static or corrupted. * * The user may try to recover from these failures by deleting GPU content * so the visual artifacts are minimized. But there is no way to recover * the state of the GPU actors before the failure. Once a CUDA context is * in this state, the only recourse is to create a new CUDA context, a new * scene, and start over. * * This is our "Best Effort" attempt to not turn a soft failure into a hard * failure because continued use of a CUDA context after it has returned an * error will usually result in a driver reset. However if the initial * failure was serious enough, a reset may have already occurred by the time * we learn of it. */ virtual bool failureDetected() const = 0; /** \brief Force the NvGpuDispatcher into failure mode * * This API should be used if user code detects a non-recoverable CUDA * error. This ensures the NvGpuDispatcher does not launch any further * CUDA work. Subsequent calls to failureDetected() will return true. */ virtual void forceFailureMode() = 0; /** \brief Returns a pointer to the current in-use profile buffer * * The returned pointer should be passed to all kernel launches to enable * CTA/Warp level profiling. If a data collector is not attached, or CTA * profiling is not enabled, the pointer will be zero. */ virtual void* getCurrentProfileBuffer() const = 0; /** \brief Register kernel names with PlatformAnalyzer * * The returned uint16_t must be stored and used as a base offset for the ID * passed to the KERNEL_START|STOP_EVENT macros. */ virtual uint16_t registerKernelNames(const char**, uint16_t count) = 0; /** \brief Launch a copy kernel with arbitrary number of copy commands * * This method is intended to be called from Kernel GpuTasks, but it can * function outside of that context as well. * * If count is 1, the descriptor is passed to the kernel as arguments, so it * may be declared on the stack. * * If count is greater than 1, the kernel will read the descriptors out of * host memory. Because of this, the descriptor array must be located in * page locked (pinned) memory. The provided descriptors may be modified by * this method (converting host pointers to their GPU mapped equivalents) * and should be considered *owned* by CUDA until the current batch of work * has completed, so descriptor arrays should not be freed or modified until * you have received a completion notification. * * If your GPU does not support mapping of page locked memory (SM>=1.1), * this function degrades to calling CUDA copy methods. */ virtual void launchCopyKernel(cudamanager::NvGpuCopyDesc* desc, uint32_t count, CUstream stream) = 0; /** \brief Query pre launch task that runs before launching gpu kernels. * * This is part of an optional feature to schedule multiple gpu features * at the same time to get kernels to run in parallel. * \note Do *not* set the continuation on the returned task, but use addPreLaunchDependent(). */ virtual NvBaseTask& getPreLaunchTask() = 0; /** \brief Adds a gpu launch task that gets executed after the pre launch task. * * This is part of an optional feature to schedule multiple gpu features * at the same time to get kernels to run in parallel. * \note Each call adds a reference to the pre-launch task. */ virtual void addPreLaunchDependent(NvBaseTask& dependent) = 0; /** \brief Query post launch task that runs after the gpu is done. * * This is part of an optional feature to schedule multiple gpu features * at the same time to get kernels to run in parallel. * \note Do *not* set the continuation on the returned task, but use addPostLaunchDependent(). */ virtual NvBaseTask& getPostLaunchTask() = 0; /** \brief Adds a task that gets executed after the post launch task. * * This is part of an optional feature to schedule multiple gpu features * at the same time to get kernels to run in parallel. * \note Each call adds a reference to the pre-launch task. */ virtual void addPostLaunchDependent(NvBaseTask& dependent) = 0; protected: /** \brief protected destructor * * GpuDispatchers are allocated and freed by their NvCudaContextManager. */ virtual ~NvGpuDispatcher() {} }; NV_POP_PACK } } // end nvidia namespace #endif
12,162
C
44.048148
120
0.703009
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvTaskManager.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_TASK_MANAGER_H #define NV_TASK_MANAGER_H #include "NvTaskDefine.h" #include "NvSimpleTypes.h" #include "NvErrorCallback.h" namespace nvidia { namespace task { NV_PUSH_PACK_DEFAULT class NvBaseTask; class NvTask; class NvLightCpuTask; typedef unsigned int NvTaskID; /** \brief Identifies the type of each heavyweight NvTask object \note This enum type is only used by NvTask and GpuTask objects, LightCpuTasks do not use this enum. @see NvTask @see NvLightCpuTask */ struct NvTaskType { /** * \brief Identifies the type of each heavyweight NvTask object */ enum Enum { TT_CPU, //!< NvTask will be run on the CPU TT_GPU, //!< NvTask will be run on the GPU TT_NOT_PRESENT, //!< Return code when attempting to find a task that does not exist TT_COMPLETED //!< NvTask execution has been completed }; }; class NvCpuDispatcher; class NvGpuDispatcher; /** \brief The NvTaskManager interface A NvTaskManager instance holds references to user-provided dispatcher objects, when tasks are submitted the NvTaskManager routes them to the appropriate dispatcher and handles task profiling if enabled. @see CpuDispatcher @see NvGpuDispatcher */ class NvTaskManager { public: /** \brief Set the user-provided dispatcher object for CPU tasks \param[in] ref The dispatcher object. @see CpuDispatcher */ virtual void setCpuDispatcher(NvCpuDispatcher& ref) = 0; /** \brief Set the user-provided dispatcher object for GPU tasks \param[in] ref The dispatcher object. @see NvGpuDispatcher */ virtual void setGpuDispatcher(NvGpuDispatcher& ref) = 0; /** \brief Get the user-provided dispatcher object for CPU tasks \return The CPU dispatcher object. @see CpuDispatcher */ virtual NvCpuDispatcher* getCpuDispatcher() const = 0; /** \brief Get the user-provided dispatcher object for GPU tasks \return The GPU dispatcher object. @see NvGpuDispatcher */ virtual NvGpuDispatcher* getGpuDispatcher() const = 0; /** \brief Reset any dependencies between Tasks \note Will be called at the start of every frame before tasks are submitted. @see NvTask */ virtual void resetDependencies() = 0; /** \brief Called by the owning scene to start the task graph. \note All tasks with with ref count of 1 will be dispatched. @see NvTask */ virtual void startSimulation() = 0; /** \brief Called by the owning scene at the end of a simulation step to synchronize the NvGpuDispatcher @see NvGpuDispatcher */ virtual void stopSimulation() = 0; /** \brief Called by the worker threads to inform the NvTaskManager that a task has completed processing \param[in] task The task which has been completed */ virtual void taskCompleted(NvTask& task) = 0; /** \brief Retrieve a task by name \param[in] name The unique name of a task \return The ID of the task with that name, or TT_NOT_PRESENT if not found */ virtual NvTaskID getNamedTask(const char* name) = 0; /** \brief Submit a task with a unique name. \param[in] task The task to be executed \param[in] name The unique name of a task \param[in] type The type of the task (default TT_CPU) \return The ID of the task with that name, or TT_NOT_PRESENT if not found */ virtual NvTaskID submitNamedTask(NvTask* task, const char* name, NvTaskType::Enum type = NvTaskType::TT_CPU) = 0; /** \brief Submit an unnamed task. \param[in] task The task to be executed \param[in] type The type of the task (default TT_CPU) \return The ID of the task with that name, or TT_NOT_PRESENT if not found */ virtual NvTaskID submitUnnamedTask(NvTask& task, NvTaskType::Enum type = NvTaskType::TT_CPU) = 0; /** \brief Retrieve a task given a task ID \param[in] id The ID of the task to return, a valid ID must be passed or results are undefined \return The task associated with the ID */ virtual NvTask* getTaskFromID(NvTaskID id) = 0; /** \brief Release the NvTaskManager object, referenced dispatchers will not be released */ virtual void release() = 0; /** \brief Construct a new NvTaskManager instance with the given [optional] dispatchers */ static NvTaskManager* createTaskManager(NvErrorCallback& errorCallback, NvCpuDispatcher* = 0, NvGpuDispatcher* = 0); protected: virtual ~NvTaskManager() {} /*! \cond PRIVATE */ virtual void finishBefore(NvTask& task, NvTaskID taskID) = 0; virtual void startAfter(NvTask& task, NvTaskID taskID) = 0; virtual void addReference(NvTaskID taskID) = 0; virtual void decrReference(NvTaskID taskID) = 0; virtual int32_t getReference(NvTaskID taskID) const = 0; virtual void decrReference(NvLightCpuTask&) = 0; virtual void addReference(NvLightCpuTask&) = 0; virtual void emitStartEvent(NvBaseTask&, uint32_t threadId=0) = 0; virtual void emitStopEvent(NvBaseTask&, uint32_t threadId=0) = 0; /*! \endcond */ friend class NvBaseTask; friend class NvTask; friend class NvLightCpuTask; friend class NvGpuWorkerThread; }; NV_POP_PACK } } // end nvidia namespace #endif
7,112
C
29.13983
120
0.6991
NVIDIA-Omniverse/PhysX/blast/source/shared/NvTask/include/NvCpuDispatcher.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. // Copyright (c) 2004-2023 AGEIA Technologies, Inc. All rights reserved. // Copyright (c) 2001-2023 NovodeX AG. All rights reserved. #ifndef NV_CPU_DISPATCHER_H #define NV_CPU_DISPATCHER_H #include "NvTaskDefine.h" #include "NvSimpleTypes.h" namespace nvidia { namespace task { class NvBaseTask; /** \brief A CpuDispatcher is responsible for scheduling the execution of tasks passed to it by the SDK. A typical implementation would for example use a thread pool with the dispatcher pushing tasks onto worker thread queues or a global queue. @see NvBaseTask @see NvTask @see NvTaskManager */ class NvCpuDispatcher { public: /** \brief Called by the TaskManager when a task is to be queued for execution. Upon receiving a task, the dispatcher should schedule the task to run when resource is available. After the task has been run, it should call the release() method and discard it's pointer. \param[in] task The task to be run. @see NvBaseTask */ virtual void submitTask( NvBaseTask& task ) = 0; /** \brief Returns the number of available worker threads for this dispatcher. The SDK will use this count to control how many tasks are submitted. By matching the number of tasks with the number of execution units task overhead can be reduced. */ virtual uint32_t getWorkerCount() const = 0; virtual ~NvCpuDispatcher() {} }; } } // end nvidia namespace #endif
3,026
C
35.469879
101
0.743556
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/buffer.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvPreprocessor.h" #include <assert.h> #include <stdio.h> #include <vector> #if NV_WINDOWS_FAMILY #define POD_Buffer std::vector #else template<typename T, int Alignment = sizeof(T)> class POD_Buffer { public: POD_Buffer() : _size(0), _capacity(0), _data(nullptr) {} ~POD_Buffer() { deallocate(); } size_t size() const { return _size; } void resize(size_t new_size) { if (new_size > _capacity) { reserve(new_size); } _size = new_size; } void reserve(size_t min_capacity) { if (min_capacity > _capacity) { void* new_data = allocate(min_capacity); if (!!_size) { memcpy(new_data, _data, _size*sizeof(T)); } deallocate(); _capacity = min_capacity; _data = reinterpret_cast<T*>(new_data); } } void push_back(const T& e) { if (_size >= _capacity) { reserve(!!_size ? 2*_size : (size_t)16); } _data[_size++] = e; } void pop_back() { if (!!_size) --_size; } T* data() { return _data; } const T* data() const { return _data; } T& operator [] (size_t index) { assert(_size > index); return _data[index]; } const T& operator [] (size_t index) const { assert(_size > index); return _data[index]; } T& back() { return (*this)[_size-1]; } const T& back() const { return (*this)[_size-1]; } private: void* allocate(size_t buffer_size) { const size_t mem_size = sizeof(T)*buffer_size; unsigned char* mem = (unsigned char*)malloc(mem_size + Alignment); const unsigned char offset = (unsigned char)((uintptr_t)Alignment - (uintptr_t)mem % Alignment - 1); mem += offset; *mem++ = offset; return mem; } void deallocate() { if (!!_data) { unsigned char* cmem = (unsigned char*)_data; const unsigned char offset = *--cmem; ::free(cmem - offset); } _size = 0; _capacity = 0; _data = nullptr; } size_t _size; size_t _capacity; T* _data; }; #endif
3,827
C
28.674418
108
0.610138
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/coupling.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "solver_types.h" #include "anglin6.h" #include "NvCMath.h" /** * Bond coupling data used as a representation of a block column of a "coupling matrix" C, * which has exactly two non-zero blocks. The non-zero blocks are of the form * * / 1 ~r_ij \ * C_ij = s_ij | |. * \ 0 1 / * * This represents the coupling of node i by bond j. The scalar s_ij is +/-1, and for each * bond (column j of C) s_ij must take on both signs. The matrix factor is again composed * of blocks, each element a 3x3 matrix. The 0 and 1's are just multiples of the unit matrix, * and ~r_ij is the 3x3 antisymmetric matrix representing "crossing with the vector r_ij on the * left" (i.e. (~u)*v = (u) x (v)). The vector r_ij represents the displacement from node i's * CoM to bond j's centroid. */ SIMD_ALIGN_32 ( struct Coupling { NvcVec3 offset0; uint32_t node0; NvcVec3 offset1; uint32_t node1; } ); template <typename Elem, typename Scalar = Float_Scalar> struct CouplingMatrixOps { /** * Sparse matrix-vector multiply y = C*x, where C is a "coupling matrix" represented by columns * of type Coupling (see the comments for Coupling). * * \param[out] y Resulting column Elem vector of length M. * \param[in] C Input M x N coupling matrix. * \param[in] x Input column Elem vector of length N. * \param[in] M The number of rows in y and C. * \param[in] N The number of rows in x and columns in C. */ inline void rmul(Elem* y, const Coupling* C, const Elem* x, uint32_t M, uint32_t N) { memset(y, 0, sizeof(AngLin6)*M); for (uint32_t j = 0 ; j < N; ++j) { const Coupling& c = C[j]; const AngLin6& x_j = x[j]; AngLin6& y0 = y[c.node0]; AngLin6& y1 = y[c.node1]; y0.ang += x_j.ang - (c.offset0^x_j.lin); y0.lin += x_j.lin; y1.ang -= x_j.ang - (c.offset1^x_j.lin); y1.lin -= x_j.lin; } } /** * Sparse matrix-vector multiply y = x*C, where C is a "coupling matrix" represented by columns * of type Coupling (see the comments for Coupling). * * \param[out] y Resulting row Elem vector of length N. * \param[in] x Input row Elem vector, must be long enough to be indexed by all values in B's representation. * \param[in] C Input M x N couping matrix. * \param[in] M The number of columns in x and rows in C. * \param[in] N The number of columns in y and C. */ inline void lmul(Elem* y, const Elem* x, const Coupling* C, uint32_t M, uint32_t N) { NV_UNUSED(M); for (uint32_t j = 0; j < N; ++j) { const Coupling& c = C[j]; const AngLin6& x0 = x[c.node0]; const AngLin6& x1 = x[c.node1]; AngLin6& y_j = y[j]; y_j.ang = x0.ang - x1.ang; y_j.lin = x0.lin - x1.lin + (c.offset0^x0.ang) - (c.offset1^x1.ang); } } }; template <typename Elem> struct CouplingMatrixOps<Elem, SIMD_Scalar> { /** * Sparse matrix-vector multiply y = C*x, where C is a "coupling matrix" represented by columns * of type Coupling (see the comments for Coupling). * * \param[out] y Resulting column Elem vector of length M. * \param[in] C Input M x N coupling matrix. * \param[in] x Input column Elem vector of length N. * \param[in] M The number of rows in y and C. * \param[in] N The number of rows in x and columns in C. */ inline void rmul(Elem* y, const Coupling* C, const Elem* x, uint32_t M, uint32_t N) { memset(y, 0, sizeof(AngLin6)*M); for (uint32_t j = 0 ; j < N; ++j) { const Coupling& c = C[j]; const AngLin6& x_j = x[j]; AngLin6& y0 = y[c.node0]; AngLin6& y1 = y[c.node1]; __m256 _x = _mm256_load_ps(&x_j.ang.x); __m256 _y0 = _mm256_load_ps(&y0.ang.x); __m256 _y1 = _mm256_load_ps(&y1.ang.x); __m256 _c = _mm256_load_ps(&c.offset0.x); _y0 = _mm256_add_ps(_y0, _x); _y1 = _mm256_sub_ps(_y1, _x); __m128 _xl = _mm256_extractf128_ps(_x, 1); __m256 _a = pair_cross3(_mm256_set_m128(_xl, _xl), _c); _y0 = _mm256_add_ps(_y0, _mm256_set_m128(_mm_setzero_ps(), _mm256_castps256_ps128(_a))); _y1 = _mm256_sub_ps(_y1, _mm256_set_m128(_mm_setzero_ps(), _mm256_extractf128_ps(_a, 1))); _mm256_store_ps(&y0.ang.x, _y0); _mm256_store_ps(&y1.ang.x, _y1); } } /** * Sparse matrix-vector multiply y = x*C, where C is a "coupling matrix" represented by columns * of type Coupling (see the comments for Coupling). * * \param[out] y Resulting row Elem vector of length N. * \param[in] x Input row Elem vector, must be long enough to be indexed by all values in B's representation. * \param[in] C Input M x N couping matrix. * \param[in] M The number of columns in x and rows in C. * \param[in] N The number of columns in y and C. */ inline void lmul(Elem* y, const Elem* x, const Coupling* C, uint32_t M, uint32_t N) { NV_UNUSED(M); for (uint32_t j = 0; j < N; ++j) { const Coupling& c = C[j]; const AngLin6& x0 = x[c.node0]; const AngLin6& x1 = x[c.node1]; AngLin6& y_j = y[j]; __m256 _x0 = _mm256_load_ps(&x0.ang.x); __m256 _x1 = _mm256_load_ps(&x1.ang.x); __m256 _c = _mm256_load_ps(&c.offset0.x); __m256 _y = _mm256_sub_ps(_x0, _x1); __m256 _a = pair_cross3(_c, _mm256_set_m128(_mm256_castps256_ps128(_x1), _mm256_castps256_ps128(_x0))); _y = _mm256_add_ps(_y, _mm256_set_m128(_mm_sub_ps(_mm256_castps256_ps128(_a), _mm256_extractf128_ps(_a, 1)), _mm_setzero_ps())); _mm256_store_ps(&y_j.ang.x, _y); } } private: inline __m256 pair_cross3(const __m256& v0, const __m256& v1) { __m256 prep0 = _mm256_shuffle_ps(v0, v0, 0xc9); __m256 prep1 = _mm256_shuffle_ps(v1, v1, 0xc9); __m256 res_shuffled = _mm256_fmsub_ps(v0, prep1, _mm256_mul_ps(prep0, v1)); return _mm256_shuffle_ps(res_shuffled, res_shuffled, 0xc9); } };
8,039
C
38.219512
140
0.591243
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/bond.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "coupling.h" #include "inertia.h" #include "anglin6.h" /** * BondMatrix * * Given a BondMatrix B, when (B^T)*B is applied to a vector of bond impulses, the result * is a vector of the differences between the the resulting accelerations of the nodes * joined by each bond. * * This is done in block form, so a vector is composed of vector elements. Each element * is a 6-dimensional vector, composed of a linear part followed by an angular part. * Matrix blocks are likewise 6x6. * * This matrix is composed of two sparse matrices: * An M x M block diagonal matrix I, where the i^th diagonal block is the 6x6 matrix: * * / I_i 0 \ * I_ii = | | * \ 0 m_i / * * Except for possibly I_i, each "element" in I_ii is a multiple of the 3x3 unit matrix. I_i is a * 3x3 symmetric inertia tensor. See the definition of Inertia<TensorType> for its representation. * * The second component is the coupling matrix C, see documentation for Coupling. * * The matrix represented by this object is (I^-1/2)*C, an M x N matrix. * * NOTE: I and C are _not_ stored as described above, for efficiency. */ template <typename TensorType> struct BondMatrix { /** Constructor clears member data. */ BondMatrix() : C(nullptr), sqrt_I_inv(nullptr), scratch(nullptr), M(0), N(0) {} /** * Set fields (shallow pointer copy). * * \param[in] _C Coupling matrix, see the documentation for Coupling. * \param[in] _sqrt_I_inv The inverse of the square root of the diagonal mass and inertia tensor, represented by a * vector of _M Inertia structs for the diagonal values. The i^th element is the reciprocal * of the square root of the mass and inertia tensor of node i. * \param[in] _scratch Scratch memory required to carry out a multiply. Must be at least _M*sizeof(AngLin6) bytes. * \param[in] _M The number of nodes. * \param[in] _N The number of bonds. */ void set(const Coupling* _C, const Inertia<TensorType>* _sqrt_I_inv, void* _scratch, uint32_t _M, uint32_t _N) { C = _C; sqrt_I_inv = _sqrt_I_inv; scratch = _scratch; M = _M; N = _N; } const Coupling* C; const Inertia<TensorType>* sqrt_I_inv; void* scratch; uint32_t M, N; }; typedef BondMatrix<float> BondMatrixS; typedef BondMatrix<NvcVec3> BondMatrixD; typedef BondMatrix<NvcMat33> BondMatrixG; template<typename TensorType, typename Scalar> struct BondMatrixOps { /** * Matrix-vector multiply y = B*x. * * \param[out] y Resulting column vector of length N. * \param[in] B Input MxN matrix representation. * \param[in] x Input column vector of length M. * \param[in] M Number of rows in B. * \param[in] N Number of columns in B. */ inline void rmul(AngLin6* y, const BondMatrix<TensorType>& B, const AngLin6* x, uint32_t M, uint32_t N) const { NV_UNUSED(M); // BondMatrix stores these NV_UNUSED(N); // Calculate y = C*x (apply C) CouplingMatrixOps<AngLin6, Scalar>().rmul(y, B.C, x, B.M, B.N); // Calculate y = (I^-1/2)*C*x (apply I^-1/2) InertiaMatrixOps<Scalar>().mul(y, B.sqrt_I_inv, y, B.M); } /** * Matrix-vector multiply y = x*B. * * \param[out] y Resulting row vector of length B.N. * \param[in] x Input row vector of length B.N. * \param[in] B Input matrix representation. * \param[in] M Number of rows in B. * \param[in] N Number of columns in B. */ inline void lmul(AngLin6* y, const AngLin6* x, const BondMatrix<TensorType>& B, uint32_t M, uint32_t N) const { NV_UNUSED(M); // BondMatrix stores these NV_UNUSED(N); AngLin6* s = (AngLin6*)B.scratch; // M-sized scratch s // Calculate s = (I^-1/2)*x (apply I^-1/2) InertiaMatrixOps<Scalar>().mul(s, B.sqrt_I_inv, x, B.M); // Calculate y = (C^T)*(I^-1/2)*x (apply C^T) CouplingMatrixOps<AngLin6, Scalar>().lmul(y, s, B.C, B.M, B.N); } }; template<typename Scalar> using BondMatrixOpsS = BondMatrixOps<float, Scalar>; template<typename Scalar> using BondMatrixOpsD = BondMatrixOps<float, NvcVec3>; template<typename Scalar> using BondMatrixOpsG = BondMatrixOps<float, NvcMat33>;
6,063
C
37.624204
124
0.652812
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/solver_types.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvCTypes.h" #include "simd/simd.h" /** * Scalar types for SIMD and non-SIMD calculations. * Currently also used as a template argument to distinguish code paths. May need a different * scheme if two codepaths use the same scalar type. */ typedef __m128 SIMD_Scalar; typedef float Float_Scalar; /** * Holds the components of a rigid body description that are necessary for the stress solver. */ template<typename InertiaType> struct SolverNode { NvcVec3 CoM; float mass; InertiaType inertia; }; typedef SolverNode<float> SolverNodeS; typedef SolverNode<NvcVec3> SolverNodeD; typedef SolverNode<NvcMat33> SolverNodeG; /** * Holds the components of a rigid body bond description that are necessary for the stress solver. */ struct SolverBond { NvcVec3 centroid; uint32_t nodes[2]; // Index into accompanying SolverNode<InertiaType> array. };
2,498
C
36.298507
98
0.7494
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/stress.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "bond.h" #include "buffer.h" class StressProcessor { public: /** Constructor clears member data. */ StressProcessor() : m_mass_scale(0.0f), m_length_scale(0.0f), m_can_resume(false) {} /** Parameters controlling the data preparation. */ struct DataParams { bool equalizeMasses = false; // Use the geometric mean of the nodes' masses instead of the individual masses. bool centerBonds = false; // Place the bond position halfway between adjoining nodes' CoMs. }; /** Parameters controlling the solver behavior. */ struct SolverParams { uint32_t maxIter = 0; // The maximum number of iterations. If 0, use CGNR for default value. float tolerance = 1.e-6f; // The relative tolerance threshold for convergence. Iteration will stop when this is reached. bool warmStart = false; // Whether or not to use the solve function's 'impulses' parameter as a starting input vector. }; /** * Build the internal representation of the stress network from nodes and bonds. * This only needs to be called initially, and any time the nodes or bonds change. * * \param[in] nodes Array of SolverNodeS (scalar inertia). * \param[in] N_nodes Number of elements in the nodes array. * \param[in] bonds Array of SolverBond. The node indices in each bond entry correspond to the ordering of the nodes array. * \param[in] N_bonds Number of elements in the bonds array. * \param[in] params Parameters affecting the processing of the input data (see DataParams). */ void prepare(const SolverNodeS* nodes, uint32_t N_nodes, const SolverBond* bonds, uint32_t N_bonds, const DataParams& params); /** * Solve for the bond impulses given the velocities of each node. The function prepare(...) must be called * before this can be used, but then solve(...) may be called multiple times. * * The vector elements (impulses and velocities) hold linear and angular parts. * * \param[out] impulses Output array of impulses exerted by each bond. For a warm or hot start, this is also used as an input. * Must be of length N_bonds passed into the prepare(...) function. * \param[in] velocities Input array of external velocities on each node. Must be of length N_nodes passed into the prepare(...) function. * \param[in] params Parameters affecting the solver characteristics (see SolverParams). * \param[out] error_sq (Optional) If not NULL, *error_sq will be filled with the angular and linear square errors (solver residuals). Default = NULL. * \param[in] resume (Optional) Set to true if impulses and velocities have not changed since last call, to resume solving. Default = false. * * \return the number of iterations taken to converge, if it converges. Otherwise, returns minus the number of iterations before exiting. */ int solve(AngLin6* impulses, const AngLin6* velocities, const SolverParams& params, AngLin6ErrorSq* error_sq = nullptr, bool resume = false); /** * Removes the indexed bond from the solver. * * \param[in] bondIndex The index of the bond to remove. Must be less than getBondCount(). * * \return true iff successful. */ bool removeBond(uint32_t bondIndex); /** * \return the number of nodes in the stress network. (Set by prepare(...).) */ uint32_t getNodeCount() const { return (uint32_t)m_recip_sqrt_I.size(); } /** * \return the number of bonds in the stress network. (Set by prepare(...), possibly reduced by removeBond(...).) */ uint32_t getBondCount() const { return (uint32_t)m_couplings.size(); } /** * \return whether or not the solver uses SIMD. If the device and OS support SSE, AVX, and FMA instruction sets, SIMD is used. */ static bool usingSIMD() { return s_use_simd; } protected: float m_mass_scale; float m_length_scale; POD_Buffer<InertiaS> m_recip_sqrt_I; POD_Buffer<Coupling> m_couplings; BondMatrixS m_B; POD_Buffer<AngLin6> m_rhs; POD_Buffer<AngLin6> m_B_scratch; POD_Buffer<AngLin6> m_solver_cache; bool m_can_resume; static const bool s_use_simd; };
6,085
C
49.716666
159
0.672309
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/inertia.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "solver_types.h" #include "NvCMath.h" /** * Holds an inertia component and a mass component. * The inertial component is represented by a TensorType, which may be a float (representing a multiple of * the unit matrix), an NvcVec3 (representing the non-zero components of a diagonal inertia tensor), or a * 3x3 symmetric matrix representing a general inertia tensor. * * This structure might also be used to store reciprocals, or powers (e.g. square roots) of these quantities. */ template <typename TensorType> struct Inertia { TensorType I; float m; }; typedef Inertia<float> InertiaS; typedef Inertia<NvcVec3> InertiaD; typedef Inertia<NvcMat33> InertiaG; template<typename Scalar = Float_Scalar> struct InertiaMatrixOps { /** * Matrix-vector multiply y = I*x. * * Apply a block-diagonal inertia matrix I to a vector of AngLin6 elements. * x and y may be the same vector. * * \param[out] y Resulting column vector of length N. * \param[in] I Input inertia matrix representation. * \param[in] x Input column vector of length N. * \param[in] N Number of columns in x and y, and the square size of I. * * x and y may be the same vector. */ inline void mul(AngLin6* y, const InertiaS* I, const AngLin6* x, uint32_t N) { for (uint32_t i = 0; i < N; ++i) { const InertiaS& I_i = I[i]; const AngLin6& x_i = x[i]; AngLin6& y_i = y[i]; y_i.ang = I_i.I*x_i.ang; y_i.lin = I_i.m*x_i.lin; } } }; template<> struct InertiaMatrixOps<SIMD_Scalar> { /** * Matrix-vector multiply y = I*x. * * Apply a block-diagonal inertia matrix I to a vector of AngLin6 elements. * * \param[out] y Resulting column vector of length N. * \param[in] I Input inertia matrix representation. * \param[in] x Input column vector of length N. * \param[in] N Number of columns in x and y, and the square size of I. * * x and y may be the same vector. */ inline void mul(AngLin6* y, const InertiaS* I, const AngLin6* x, uint32_t N) { for (uint32_t i = 0; i < N; ++i) { const InertiaS& I_i = I[i]; const AngLin6& x_i = x[i]; AngLin6& y_i = y[i]; __m256 _x = _mm256_load_ps(&x_i.ang.x); __m128 _Il = _mm_load1_ps(&I_i.I); __m128 _Ih = _mm_load1_ps(&I_i.m); __m256 _I = _mm256_set_m128(_Ih,_Il); __m256 _y = _mm256_mul_ps(_I, _x); _mm256_store_ps(&y_i.ang.x, _y); } } };
4,263
C
35.444444
109
0.645555
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/anglin6.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvCMath.h" #include "simd/simd.h" /** * Holds an angular and linear component, for angular and linear velocities, accelerations, impulses, torques and forces, etc. */ SIMD_ALIGN_32( struct AngLin6 { SIMD_ALIGN_16(NvcVec3 ang); SIMD_ALIGN_16(NvcVec3 lin); } ); /** * Holds the angular and linear components of the calculated error. */ struct AngLin6ErrorSq { float ang, lin; }; /** * SISD AngLin6 operations. */ template<typename Scalar = float> struct AngLin6Ops { /** r = x + y */ inline void add(AngLin6& r, const AngLin6& x, const AngLin6& y) { r.ang = x.ang + y.ang; r.lin = x.lin + y.lin; } /** r = x - y */ inline void sub(AngLin6& r, const AngLin6& x, const AngLin6& y) { r.ang = x.ang - y.ang; r.lin = x.lin - y.lin; } /** r = c*x + y */ inline void madd(AngLin6& r, float c, const AngLin6& x, const AngLin6& y) { r.ang = c*x.ang + y.ang; r.lin = c*x.lin + y.lin; } /** r = -c*x + y */ inline void nmadd(AngLin6& r, float c, const AngLin6& x, const AngLin6& y) { r.ang = y.ang - c*x.ang; r.lin = y.lin - c*x.lin; } /** Vector add */ inline void vadd(AngLin6* r, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) add(*r++, *x++, *y++); } /** Vector sub */ inline void vsub(AngLin6* r, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) sub(*r++, *x++, *y++); } /** Vector madd */ inline void vmadd(AngLin6* r, float c, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) madd(*r++, c, *x++, *y++); } /** Vector nmadd */ inline void vnmadd(AngLin6* r, float c, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) nmadd(*r++, c, *x++, *y++); } /** * Vector-of-vectors dot product. * * \param[in] v Vector of AngLin6, of length N. * \param[in] w Vector of AngLin6, of length N. * \param[in] N Number of elements in v and w. * * return (v|w). */ inline float dot(const AngLin6* v, const AngLin6* w, uint32_t N) { float result = 0.0f; for (uint32_t i = 0; i < N; ++i) { const AngLin6& v_i = v[i]; const AngLin6& w_i = w[i]; result += (v_i.ang|w_i.ang) + (v_i.lin|w_i.lin); } return result; } /** * Vector-of-vectors length squared. * * Equivalent to dot(v, v N), but could be faster in some cases * * \param[in] v Vector of AngLin6, of length N. * \param[in] N Number of elements in v. * * return |v|^2. */ inline float length_sq(const AngLin6* v, uint32_t N) { float result = 0.0f; for (uint32_t i = 0; i < N; ++i) { const AngLin6& v_i = v[i]; result += (v_i.ang|v_i.ang) + (v_i.lin|v_i.lin); } return result; } /** * Vector-of-vectors length squared, split into angular and linear contributions. * * \param[out] error_sq Sum of the squared angular and linear parts of v. * \param[in] v Vector of AngLin6, of length N. * \param[in] N Number of elements in v. * * \return the sum of the squared angular and linear errors, error.ang + error.lin. */ inline float calculate_error(AngLin6ErrorSq& error_sq, const AngLin6* v, uint32_t N) { error_sq.ang = error_sq.lin = 0.0f; for (uint32_t i = 0; i < N; ++i) { const AngLin6& v_i = v[i]; error_sq.ang += v_i.ang|v_i.ang; error_sq.lin += v_i.lin|v_i.lin; } return error_sq.ang + error_sq.lin; } }; /** * SIMD AngLin6 operations. */ template<> struct AngLin6Ops<__m128> { /** r = x + y */ inline void add(AngLin6& r, const AngLin6& x, const AngLin6& y) { __m256 _x = _mm256_load_ps(&x.ang.x); __m256 _y = _mm256_load_ps(&y.ang.x); __m256 _r = _mm256_add_ps(_x, _y); _mm256_store_ps(&r.ang.x, _r); } /** r = x - y */ inline void sub(AngLin6& r, const AngLin6& x, const AngLin6& y) { __m256 _x = _mm256_load_ps(&x.ang.x); __m256 _y = _mm256_load_ps(&y.ang.x); __m256 _r = _mm256_sub_ps(_x, _y); _mm256_store_ps(&r.ang.x, _r); } /** r = c*x + y */ inline void madd(AngLin6& r, __m128 c, const AngLin6& x, const AngLin6& y) { __m256 _c = _mm256_set_m128(c, c); __m256 _x = _mm256_load_ps(&x.ang.x); __m256 _y = _mm256_load_ps(&y.ang.x); __m256 _r = _mm256_fmadd_ps(_c, _x, _y); _mm256_store_ps(&r.ang.x, _r); } /** r = -c*x + y */ inline void nmadd(AngLin6& r, __m128 c, const AngLin6& x, const AngLin6& y) { __m256 _c = _mm256_set_m128(c, c); __m256 _x = _mm256_load_ps(&x.ang.x); __m256 _y = _mm256_load_ps(&y.ang.x); __m256 _r = _mm256_fnmadd_ps(_c, _x, _y); _mm256_store_ps(&r.ang.x, _r); } /** Vector add */ inline void vadd(AngLin6* r, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) add(*r++, *x++, *y++); } /** Vector sub */ inline void vsub(AngLin6* r, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) sub(*r++, *x++, *y++); } /** Vector madd */ inline void vmadd(AngLin6* r, __m128 c, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) madd(*r++, c, *x++, *y++); } /** Vector nmadd */ inline void vnmadd(AngLin6* r, __m128 c, const AngLin6* x, const AngLin6* y, uint32_t N) { while (N--) nmadd(*r++, c, *x++, *y++); } /** * Vector-of-vectors dot product. * * \param[in] v Vector of AngLin6, of length N. * \param[in] w Vector of AngLin6, of length N. * \param[in] N Number of elements in v and w. * * return (v|w). */ inline __m128 dot(const AngLin6* v, const AngLin6* w, uint32_t N) { __m256 _res = _mm256_setzero_ps(); for (uint32_t i = 0; i < N; ++i) { __m256 _v = _mm256_load_ps((const float*)(v+i)); __m256 _w = _mm256_load_ps((const float*)(w+i)); _res = _mm256_add_ps(_res, _mm256_dp_ps(_v, _w, 0x7f)); } return _mm_add_ps(_mm256_castps256_ps128(_res), _mm256_extractf128_ps(_res, 1)); } /** * Vector-of-vectors length squared. * * Equivalent to dot(v, v N), but could be faster in some cases * * \param[in] v Vector of AngLin6, of length N. * \param[in] N Number of elements in v. * * return |v|^2. */ inline __m128 length_sq(const AngLin6* v, uint32_t N) { __m256 _res = _mm256_setzero_ps(); for (uint32_t i = 0; i < N; ++i) { __m256 _v = _mm256_load_ps((const float*)(v+i)); _res = _mm256_add_ps(_res, _mm256_dp_ps(_v, _v, 0x7f)); } return _mm_add_ps(_mm256_castps256_ps128(_res), _mm256_extractf128_ps(_res, 1)); } /** * Vector-of-vectors length squared, split into angular and linear contributions. * * \param[out] error_sq Sum of the squared angular and linear parts of v. * \param[in] v Vector of AngLin6, of length N. * \param[in] N Number of elements in v. * * \return the sum of the squared angular and linear errors, error.ang + error.lin. */ inline __m128 calculate_error(AngLin6ErrorSq& error_sq, const AngLin6* v, uint32_t N) { __m256 _res = _mm256_setzero_ps(); for (uint32_t i = 0; i < N; ++i) { __m256 _v = _mm256_load_ps((const float*)(v+i)); _res = _mm256_add_ps(_res, _mm256_dp_ps(_v, _v, 0x7f)); } __m128 _ang_sq = _mm256_castps256_ps128(_res); __m128 _lin_sq = _mm256_extractf128_ps(_res, 1); _mm_store_ss(&error_sq.ang, _ang_sq); _mm_store_ss(&error_sq.lin, _lin_sq); return _mm_add_ps(_ang_sq, _lin_sq); } };
9,664
C
33.151943
139
0.552566
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/stress.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "stress.h" #include "math/cgnr.h" #include "simd/simd_device_query.h" #include <algorithm> #include <cmath> #define MASS_AND_LENGTH_SCALING 1 typedef CGNR<AngLin6, AngLin6Ops<Float_Scalar>, BondMatrixS, BondMatrixOpsS<Float_Scalar>, Float_Scalar, AngLin6ErrorSq> CGNR_SISD; typedef CGNR<AngLin6, AngLin6Ops<SIMD_Scalar>, BondMatrixS, BondMatrixOpsS<SIMD_Scalar>, SIMD_Scalar, AngLin6ErrorSq> CGNR_SIMD; /** * StressProcessor static members */ // Check for SSE, FMA3, and AVX support const bool StressProcessor::s_use_simd = device_supports_instruction_set(InstructionSet::SSE) && // Basic SSE device_supports_instruction_set(InstructionSet::FMA3) && // Fused Multiply-Add instructions device_supports_instruction_set(InstructionSet::OSXSAVE) && // OS uses XSAVE and XRSTORE instructions allowing saving YMM registers on context switch device_supports_instruction_set(InstructionSet::AVX) && // Advanced Vector Extensions (256 bit operations) os_supports_avx_restore(); // OS has enabled the required extended state for AVX /** * StressProcessor methods */ void StressProcessor::prepare(const SolverNodeS* nodes, uint32_t N_nodes, const SolverBond* bonds, uint32_t N_bonds, const DataParams& params) { m_recip_sqrt_I.resize(N_nodes); m_couplings.resize(N_bonds); m_rhs.resize(N_nodes); m_B_scratch.resize(N_nodes); m_solver_cache.resize(s_use_simd ? CGNR_SIMD().required_cache_size(N_nodes, N_bonds) : CGNR_SISD().required_cache_size(N_nodes, N_bonds)); m_can_resume = false; // Calculate bond offsets and length scale uint32_t offsets_to_scale = 0; m_length_scale = 0.0f; for (uint32_t i = 0; i < N_bonds; ++i) { const SolverBond& bond = bonds[i]; const uint32_t b0 = bond.nodes[0]; const uint32_t b1 = bond.nodes[1]; Coupling& c = m_couplings[i]; NvcVec3 offset0, offset1; if (!params.centerBonds) { offset0 = nodes[b0].mass > 0 ? bond.centroid - nodes[b0].CoM : nodes[b1].CoM - bond.centroid; offset1 = nodes[b1].mass > 0 ? bond.centroid - nodes[b1].CoM : nodes[b0].CoM - bond.centroid; } else { if (nodes[b0].mass <= 0) { offset1 = bond.centroid - nodes[b1].CoM; offset0 = -offset1; } else if (nodes[b1].mass <= 0) { offset0 = bond.centroid - nodes[b0].CoM; offset1 = -offset0; } else { offset0 = 0.5f*(nodes[b1].CoM - nodes[b0].CoM); offset1 = -offset0; } } if (nodes[b0].mass > 0.0f) { ++offsets_to_scale; m_length_scale += std::sqrt(offset0|offset0); } if (nodes[b1].mass > 0.0f) { ++offsets_to_scale; m_length_scale += std::sqrt(offset1|offset1); } c.offset0 = offset0; c.node0 = bond.nodes[0]; c.offset1 = offset1; c.node1 = bond.nodes[1]; } #if MASS_AND_LENGTH_SCALING m_length_scale = offsets_to_scale ? m_length_scale / offsets_to_scale : 1.0f; #else m_length_scale = 1.0f; #endif // Scale offsets by length scale const float recip_length_scale = 1.0f/m_length_scale; for (uint32_t j = 0; j < N_bonds; ++j) { Coupling& coupling = m_couplings[j]; coupling.offset0 *= recip_length_scale; coupling.offset1 *= recip_length_scale; } // Set mass scale to geometric mean of the masses m_mass_scale = 0.0f; uint32_t nonzero_mass_count = 0; for (uint32_t i = 0; i < N_nodes; ++i) { if (nodes[i].mass > 0.0f) { m_mass_scale += std::log(nodes[i].mass); ++nonzero_mass_count; } } #if MASS_AND_LENGTH_SCALING m_mass_scale = nonzero_mass_count ? std::exp(m_mass_scale / nonzero_mass_count) : 1.0f; #else m_mass_scale = 1.0f; #endif // Generate I^-1/2 std::vector<InertiaS> invI(N_nodes); const float inertia_scale = m_mass_scale*m_length_scale*m_length_scale; if (!params.equalizeMasses) { for (uint32_t i = 0; i < N_nodes; ++i) { invI[i] = { nodes[i].inertia > 0.0f ? inertia_scale/nodes[i].inertia : 0.0f, nodes[i].mass > 0.0f ? m_mass_scale/nodes[i].mass : 0.0f }; m_recip_sqrt_I[i] = { std::sqrt(invI[i].I), std::sqrt(invI[i].m) }; } } else { for (uint32_t i = 0; i < N_nodes; ++i) { invI[i] = { nodes[i].inertia > 0.0f ? 1.0f : 0.0f, nodes[i].mass > 0.0f ? 1.0f : 0.0f }; m_recip_sqrt_I[i] = { std::sqrt(invI[i].I), std::sqrt(invI[i].m) }; } } // Create sparse matrix representation for B = (I^-1/2)*C m_B.set(m_couplings.data(), m_recip_sqrt_I.data(), m_B_scratch.data(), N_nodes, N_bonds); } int StressProcessor::solve(AngLin6* impulses, const AngLin6* velocities, const SolverParams& params, AngLin6ErrorSq* error_sq /* = nullptr */, bool resume /* = false */) { const InertiaS* sqrt_I_inv = m_recip_sqrt_I.data(); const uint32_t N_nodes = getNodeCount(); const uint32_t N_bonds = getBondCount(); void* cache = m_solver_cache.data(); const float recip_length_scale = 1.0f/m_length_scale; // Apply length and mass scaling to impulses if warm-starting if (params.warmStart) { const float recip_mass_scale = 1.0f/m_mass_scale; const float recip_linear_impulse_scale = recip_length_scale*recip_mass_scale; const float recip_angular_impulse_scale = recip_length_scale*recip_linear_impulse_scale; for (uint32_t j = 0; j < N_bonds; ++j) { impulses[j].ang *= recip_angular_impulse_scale; impulses[j].lin *= recip_linear_impulse_scale; } } // Calculate r.h.s. vector b = -(I^1/2)*velocities AngLin6* b = m_rhs.data(); for (uint32_t i = 0; i < N_nodes; ++i) { const InertiaS& I_i = sqrt_I_inv[i]; const AngLin6& v_i = velocities[i]; AngLin6& b_i = b[i]; b_i.ang = v_i.ang/(-(I_i.I > 0 ? I_i.I : 1.0f)); b_i.lin = (-recip_length_scale/(I_i.m > 0 ? I_i.m : 1.0f))*v_i.lin; } // Solve B*J = b for J, where B = (I^-1/2)*C and b = -(I^1/2)*v. // Since CGNR does this by solving (B^T)*B*J = (B^T)*b, this actually solves // (C^T)*(I^-1)*C*J = -(C^T)*v for J, which is the equation we really wanted to solve. const uint32_t maxIter = params.maxIter ? params.maxIter : 6*std::max(N_nodes, N_bonds); // Set solver warmth const unsigned warmth = params.warmStart ? (m_can_resume && resume ? 2 : 1) : 0; // Choose solver based on parameters const int result = s_use_simd ? CGNR_SIMD().solve(impulses, m_B, b, N_nodes, N_bonds, cache, error_sq, params.tolerance, maxIter, warmth) : CGNR_SISD().solve(impulses, m_B, b, N_nodes, N_bonds, cache, error_sq, params.tolerance, maxIter, warmth); // Undo length and mass scaling const float linear_impulse_scale = m_length_scale*m_mass_scale; const float angular_impulse_scale = m_length_scale*linear_impulse_scale; for (uint32_t j = 0; j < N_bonds; ++j) { impulses[j].ang *= angular_impulse_scale; impulses[j].lin *= linear_impulse_scale; } m_can_resume = true; return result; } bool StressProcessor::removeBond(uint32_t bondIndex) { if (bondIndex >= getBondCount()) return false; m_couplings[bondIndex] = m_couplings.back(); m_couplings.pop_back(); --m_B.N; m_can_resume = false; return true; }
9,381
C++
34.94636
165
0.609636
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/math/cgnr.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include <stdint.h> #include <cstring> // for memcpy, memset #include "simd/simd.h" template<typename Elem, typename ElemOps, typename Mat, typename MatOps, typename Scalar = float, typename Error = float> struct CGNR { /** * Conjugate Gradient Normal Equation Residual (CGNR) solver for systems of M equations and N unknowns. * * Based on Matrix Computations (4th ed.) by Golub and Van Loan, section 11.3.9. * * Solves A*x = b. * * Template arguments: * Elem: the type of element used in the vectors x and b, and (implicitly) in the matrix A. * * ElemOps: a class which defines various functions on Elem type and vectors of Elem type. * * Mat: the explicit type used to represent the matrix, allowing e.g. for sparse representations. * * MatOps: a class which defines the functions rmul and lmul, which multiply a matrix of type Mat * by an Elem-typed vector on the right and left, respectively. The function signatures must be: * * void rmul(Elem* y, const Mat& A, const Elem* x, uint32_t M, uint32_t N); // y = A*x * void lmul(Elem* y, const Elem* x, const Mat& A, uint32_t M, uint32_t N); // y = x*A * * Scalar: set to float by default. May be used to keep all operations in a particular representation, e.g. SIMD registers. * * \param[out] x User-supplied Elem vector of length N, filled with the solution upon exit (if successful). * \param[in] A System M x N matrix of type Mat. * \param[in] b Right hand side of equation to be solved, an Elem vector of length M. * \param[in] M The number of rows in A and elements in b. * \param[in] N The number of columns in A and elements in x. * \param[in] cache Cache memory provided by the user, must be at least required_cache_size(M, N) bytes, and sizeof(Elem)-byte aligned. * \param[out] error_ptr If not null, returns the square magnitude error calculated from residual. * \param[in] tol (Optional) relative convergence threshold for |(A^T)*(A*x-b)|/|b|. Default value is 10^-6. * \param[in] max_it (Optional) the maximum number of internal iterations. If set to 0, the maximum is N. Default value is 0. * \param[in] warmth (Optional) valid values are 0, 1, and 2. 0 => cold, clears the x vector and ignores the cache. * 1 => warm, uses the x vector as a starting solution, but still ignores the cache. 2 => hot, uses the x * vector as a starting solution, and the cache must be valid. Default value is 0. * N.B. if warmth == 2, then this function must have been called previously, and the equation values * (x, A, b, M, and N) as well as the cache must not have been changed since the last call. * * return the number of iterations taken to converge, if it converges. Otherwise, returns minus the number of iterations before exiting. */ int solve ( Elem* x, const Mat& A, const Elem* b, uint32_t M, uint32_t N, void* cache, Error* error_ptr = nullptr, float tol = 1.e-6f, uint32_t max_it = 0, unsigned warmth = 0 ) { // Cache and temporary storage static_assert(sizeof(Elem) >= sizeof(Scalar), "sizeof(Elem) must be at least as great as sizeof(Scalar)."); float* z_last_sq_mem = (float*)cache; cache = (Elem*)z_last_sq_mem + 1; // Elem-sized storage float* delta_sq_mem = (float*)cache; cache = (Elem*)delta_sq_mem + 1; // Elem-sized storage Elem* z = (Elem*)cache; cache = z + N; // Array of length N Elem* p = (Elem*)cache; cache = p + N; // Array of length N Elem* r = (Elem*)cache; cache = r + M; // Array of length M Elem* s = (Elem*)cache; // Array of length M Scalar z_last_sq, delta_sq; load_float(z_last_sq, z_last_sq_mem); load_float(delta_sq, delta_sq_mem); if (warmth < 2) // Not hot { delta_sq = mul(tol*tol, ElemOps().length_sq(b, M)); // Calculate allowed residual length squared and cache it store_float(delta_sq_mem, delta_sq); memcpy(r, b, sizeof(Elem)*M); // Initialize residual r = b if (warmth) // Warm start, r = b - A*x { MatOps().rmul(s, A, x, M, N); ElemOps().vsub(r, r, s, M); } else memset(x, 0, sizeof(Elem)*N); // Cold start, x = 0 so r = b warmth = 0; // This lets p be initialized in the loop below } Error error; // Iterate if (!max_it) max_it = N; // Default to a maximum of N iterations uint32_t it = 0; do { MatOps().lmul(z, r, A, M, N); // Set z = (A^T)*r const Scalar z_sq = ElemOps().calculate_error(error, z, N); // Calculate residual (of modified equation) length squared if (le(z_sq, delta_sq)) break; // Terminate (convergence) if within tolerance if (warmth || warmth++) ElemOps().vmadd(p, div(z_sq, z_last_sq), p, z, N); // If not cold set p = z + (|z|^2/|z_last|^2)*p, and make warm hereafter else memcpy(p, z, sizeof(Elem)*N); // If cold set p = z z_last_sq = z_sq; MatOps().rmul(s, A, p, M, N); // Calculate s = A*p const Scalar mu = div(z_sq, ElemOps().length_sq(s, M)); // mu = |z|^2 / |A*p|^2 ElemOps().vmadd(x, mu, p, x, N); // x += mu*p ElemOps().vnmadd(r, mu, s, r, M); // r -= mu*s } while (++it < max_it); // Store off remainder of state (the rest was maintained in memory with array operations) store_float(z_last_sq_mem, z_last_sq); // Store off the error if requested if (error_ptr) *error_ptr = error; // Return the number of iterations used if successful. Otherwise return minus the number of iterations performed return it < max_it ? (int)it : -(int)it; } /** * \param[in] M See solve(...) for a description. * \param[in] N See solve(...) for a description. * * \return the required cache size (in bytes) for the given values of M and N. */ size_t required_cache_size(uint32_t M, uint32_t N) { return 2*(M+N+1)*sizeof(Elem); } };
8,613
C
54.217948
160
0.574132
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/simd/simd.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include <xmmintrin.h> #include <emmintrin.h> #include <immintrin.h> #if defined(__GNUC__) // missing with gcc #define _mm256_set_m128(vh, vl) _mm256_insertf128_ps(_mm256_castps128_ps256(vl), (vh), 1) #endif #define SIMD_ALIGN_16(code) NV_ALIGN_PREFIX(16) code NV_ALIGN_SUFFIX(16) #define SIMD_ALIGN_32(code) NV_ALIGN_PREFIX(32) code NV_ALIGN_SUFFIX(32) inline __m128 add(const __m128& a, const __m128& b) { return _mm_add_ps(a, b); } inline __m128 add(float a, const __m128& b) { return _mm_add_ps(_mm_load1_ps(&a), b); } inline __m128 add(const __m128& a, float b) { return _mm_add_ps(a, _mm_load1_ps(&b)); } inline float add(float a, float b) { return a + b; } inline __m128 sub(const __m128& a, const __m128& b) { return _mm_sub_ps(a, b); } inline __m128 sub(float a, const __m128& b) { return _mm_sub_ps(_mm_load1_ps(&a), b); } inline __m128 sub(const __m128& a, float b) { return _mm_sub_ps(a, _mm_load1_ps(&b)); } inline float sub(float a, float b) { return a - b; } inline __m128 mul(const __m128& a, const __m128& b) { return _mm_mul_ps(a, b); } inline __m128 mul(float a, const __m128& b) { return _mm_mul_ps(_mm_load1_ps(&a), b); } inline __m128 mul(const __m128& a, float b) { return _mm_mul_ps(a, _mm_load1_ps(&b)); } inline float mul(float a, float b) { return a * b; } inline __m128 div(const __m128& a, const __m128& b) { return _mm_div_ps(a, b); } inline __m128 div(float a, const __m128& b) { return _mm_div_ps(_mm_load1_ps(&a), b); } inline __m128 div(const __m128& a, float b) { return _mm_div_ps(a, _mm_load1_ps(&b)); } inline float div(float a, float b) { return a / b; } inline bool lt(const __m128& a, const __m128& b) { return !!_mm_comilt_ss(a, b); } inline bool gt(const __m128& a, const __m128& b) { return !!_mm_comigt_ss(a, b); } inline bool le(const __m128& a, const __m128& b) { return !!_mm_comile_ss(a, b); } inline bool ge(const __m128& a, const __m128& b) { return !!_mm_comige_ss(a, b); } inline bool eq(const __m128& a, const __m128& b) { return !!_mm_comieq_ss(a, b); } inline bool ne(const __m128& a, const __m128& b) { return !!_mm_comineq_ss(a, b); } inline bool lt(const float a, const float b) { return a < b; } inline bool gt(const float a, const float b) { return a > b; } inline bool le(const float a, const float b) { return a <= b; } inline bool ge(const float a, const float b) { return a >= b; } inline bool eq(const float a, const float b) { return a == b; } inline bool ne(const float a, const float b) { return a != b; } inline float to_float(const __m128& x) { float f; _mm_store_ss(&f, x); return f; } inline float to_float(float x) { return x; } inline void from_float(__m128& x, float y) { x = _mm_load1_ps(&y); } inline void from_float(float& x, float y) { x = y; } inline void set_zero(__m128& x) { x = _mm_setzero_ps(); } inline void set_zero(float& x) { x = 0.0f; } inline void store_float(float* mem, const __m128& f) { _mm_store_ps(mem, f); } inline void store_float(float* mem, float f) { *mem = f; } inline void load_float(__m128& f, const float* mem) { f = _mm_load_ps(mem); } inline void load_float(float& f, const float* mem) { f = *mem; } inline __m128 prep_cross3(const __m128& v) { return _mm_shuffle_ps(v, v, 0xc9); } // w z y x -> w x z y inline __m128 cross3(const __m128& v0, const __m128& v1) { __m128 prep0 = prep_cross3(v0); __m128 prep1 = prep_cross3(v1); __m128 res_shuffled = _mm_sub_ps(_mm_mul_ps(v0, prep1), _mm_mul_ps(prep0, v1)); return _mm_shuffle_ps(res_shuffled, res_shuffled, 0xc9); } inline __m128 cross3_prep0(const __m128& v0, const __m128& prep0, const __m128& v1) { __m128 prep1 = prep_cross3(v1); __m128 res_shuffled = _mm_sub_ps(_mm_mul_ps(v0, prep1), _mm_mul_ps(prep0, v1)); return _mm_shuffle_ps(res_shuffled, res_shuffled, 0xc9); } inline __m128 cross3_prep1(const __m128& v0, const __m128& v1, const __m128& prep1) { __m128 prep0 = prep_cross3(v0); __m128 res_shuffled = _mm_sub_ps(_mm_mul_ps(v0, prep1), _mm_mul_ps(prep0, v1)); return _mm_shuffle_ps(res_shuffled, res_shuffled, 0xc9); }
5,930
C
50.12931
103
0.631872
NVIDIA-Omniverse/PhysX/blast/source/shared/stress_solver/simd/simd_device_query.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include <utility> #include <stdio.h> inline static constexpr uint32_t instSetCode(uint8_t fn, uint8_t bitset, uint8_t bit) { return (uint32_t)fn << 16 | (uint32_t)bitset << 8 | (uint32_t)bit; } inline static void extractInstSetBitsetAndBit(int& fn, int& bitset, int& bit, uint32_t code) { fn = (int)(code >> 16); bitset = (int)(code >> 8)&0xff; bit = (int)(code & 0xff); } struct InstructionSet { enum Enum { MMX = instSetCode(1, 3, 23), SSE = instSetCode(1, 3, 25), SSE2 = instSetCode(1, 3, 26), SSE3 = instSetCode(1, 2, 0), SSSE3 = instSetCode(1, 2, 9), SSE4_1 = instSetCode(1, 2, 19), SSE4_2 = instSetCode(1, 2, 20), OSXSAVE = instSetCode(1, 2, 27), AVX = instSetCode(1, 2, 28), AVX2 = instSetCode(7, 1, 5), FMA3 = instSetCode(1, 2, 12), AVX512F = instSetCode(7, 1, 16), AVX512PF = instSetCode(7, 1, 26), AVX512ER = instSetCode(7, 1, 27), AVX512CD = instSetCode(7, 1, 28) }; }; #define InstructionSetEntry(_name) { #_name, InstructionSet::_name } constexpr std::pair<const char*, uint32_t> sInstructionSetLookup[] = { InstructionSetEntry(MMX), InstructionSetEntry(SSE), InstructionSetEntry(SSE2), InstructionSetEntry(SSE3), InstructionSetEntry(SSSE3), InstructionSetEntry(SSE4_1), InstructionSetEntry(SSE4_2), InstructionSetEntry(OSXSAVE), InstructionSetEntry(AVX), InstructionSetEntry(AVX2), InstructionSetEntry(FMA3), InstructionSetEntry(AVX512F), InstructionSetEntry(AVX512PF), InstructionSetEntry(AVX512ER), InstructionSetEntry(AVX512CD), }; #if NV_WINDOWS_FAMILY #include <intrin.h> // for __cpuidex inline void cpuid(int cpui[4], int fn) { __cpuidex(cpui, fn, 0); } inline bool os_supports_avx_restore() { return ((uint32_t)_xgetbv(0) & 6) == 6; } #else #include <cpuid.h> // for __cpuid_count inline void cpuid(int cpui[4], int fn) { __cpuid_count(fn, 0, cpui[0], cpui[1], cpui[2], cpui[3]); } inline bool os_supports_avx_restore() { uint32_t xcr0; __asm__("xgetbv" : "=a" (xcr0) : "c" (0) : "%edx"); return (xcr0 & 6) == 6; } #endif static bool device_supports_instruction_set(uint32_t inst_set) { int fn, bitset, bit; extractInstSetBitsetAndBit(fn, bitset, bit, inst_set); int cpui[4]; cpuid(cpui, 0); if (cpui[0] < fn) return false; cpuid(cpui, fn); return !!((cpui[bitset] >> bit) & 1); } static void print_supported_instruction_sets() { printf("Supported instruction sets:\n"); for (std::pair<const char*, uint32_t> entry : sInstructionSetLookup) { printf("%s: %s\n", entry.first, device_supports_instruction_set(entry.second) ? "yes" : "no"); } }
4,326
C
32.284615
102
0.676375
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFileBuffer/include/NsMemoryBuffer.h
/* * Copyright 2009-2011 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ #ifndef NS_MEMORY_BUFFER_H #define NS_MEMORY_BUFFER_H #include "Ns.h" #include "NsUserAllocated.h" #include "NsAlignedMalloc.h" #include "NvFileBuf.h" #include "NvAssert.h" namespace nvidia { namespace general_NvIOStream2 { using namespace shdfnd; const uint32_t BUFFER_SIZE_DEFAULT = 4096; //Use this class if you want to use your own allocator template<class Allocator> class NvMemoryBufferBase : public NvFileBuf, public Allocator { NV_NOCOPY(NvMemoryBufferBase) void init(const void *readMem, uint32_t readLen) { mAllocator = this; mReadBuffer = mReadLoc = static_cast<const uint8_t *>(readMem); mReadStop = &mReadLoc[readLen]; mWriteBuffer = mWriteLoc = mWriteStop = NULL; mWriteBufferSize = 0; mDefaultWriteBufferSize = BUFFER_SIZE_DEFAULT; mOpenMode = OPEN_READ_ONLY; mSeekType = SEEKABLE_READ; } void init(uint32_t defaultWriteBufferSize) { mAllocator = this; mReadBuffer = mReadLoc = mReadStop = NULL; mWriteBuffer = mWriteLoc = mWriteStop = NULL; mWriteBufferSize = 0; mDefaultWriteBufferSize = defaultWriteBufferSize; mOpenMode = OPEN_READ_WRITE_NEW; mSeekType = SEEKABLE_READWRITE; } public: NvMemoryBufferBase(const void *readMem,uint32_t readLen) { init(readMem, readLen); } NvMemoryBufferBase(const void *readMem,uint32_t readLen, const Allocator &alloc): Allocator(alloc) { init(readMem, readLen); } NvMemoryBufferBase(uint32_t defaultWriteBufferSize = BUFFER_SIZE_DEFAULT) { init(defaultWriteBufferSize); } NvMemoryBufferBase(uint32_t defaultWriteBufferSize, const Allocator &alloc): Allocator(alloc) { init(defaultWriteBufferSize); } virtual ~NvMemoryBufferBase(void) { reset(); } void setAllocator(Allocator *allocator) { mAllocator = allocator; } void initWriteBuffer(uint32_t size) { if ( mWriteBuffer == NULL ) { if ( size < mDefaultWriteBufferSize ) size = mDefaultWriteBufferSize; mWriteBuffer = static_cast<uint8_t *>(mAllocator->allocate(size)); NV_ASSERT( mWriteBuffer ); mWriteLoc = mWriteBuffer; mWriteStop = &mWriteBuffer[size]; mWriteBufferSize = size; mReadBuffer = mWriteBuffer; mReadStop = &mWriteBuffer[size]; mReadLoc = mWriteBuffer; } } void reset(void) { mAllocator->deallocate(mWriteBuffer); mWriteBuffer = NULL; mWriteBufferSize = 0; mWriteLoc = NULL; mWriteStop = NULL; mReadBuffer = NULL; mReadStop = NULL; mReadLoc = NULL; } virtual OpenMode getOpenMode(void) const { return mOpenMode; } SeekType isSeekable(void) const { return mSeekType; } virtual uint32_t read(void* buffer, uint32_t size) { if ( (mReadLoc+size) > mReadStop ) { size = uint32_t(mReadStop - mReadLoc); } if ( size != 0 ) { memmove(buffer,mReadLoc,size); mReadLoc+=size; } return size; } virtual uint32_t peek(void* buffer, uint32_t size) { if ( (mReadLoc+size) > mReadStop ) { size = uint32_t(mReadStop - mReadLoc); } if ( size != 0 ) { memmove(buffer,mReadLoc,size); } return size; } virtual uint32_t write(const void* buffer, uint32_t size) { NV_ASSERT( mOpenMode == OPEN_READ_WRITE_NEW ); if ( mOpenMode == OPEN_READ_WRITE_NEW ) { if ( (mWriteLoc+size) > mWriteStop ) growWriteBuffer(size); memmove(mWriteLoc,buffer,size); mWriteLoc+=size; mReadStop = mWriteLoc; } else { size = 0; } return size; } NV_INLINE const uint8_t * getReadLoc(void) const { return mReadLoc; } NV_INLINE void advanceReadLoc(uint32_t len) { NV_ASSERT(mReadBuffer); if ( mReadBuffer ) { mReadLoc+=len; if ( mReadLoc >= mReadStop ) { mReadLoc = mReadStop; } } } virtual uint32_t tellRead(void) const { uint32_t ret=0; if ( mReadBuffer ) { ret = uint32_t(mReadLoc-mReadBuffer); } return ret; } virtual uint32_t tellWrite(void) const { return uint32_t(mWriteLoc-mWriteBuffer); } virtual uint32_t seekRead(uint32_t loc) { uint32_t ret = 0; NV_ASSERT(mReadBuffer); if ( mReadBuffer ) { mReadLoc = &mReadBuffer[loc]; if ( mReadLoc >= mReadStop ) { mReadLoc = mReadStop; } ret = uint32_t(mReadLoc-mReadBuffer); } return ret; } virtual uint32_t seekWrite(uint32_t loc) { uint32_t ret = 0; NV_ASSERT( mOpenMode == OPEN_READ_WRITE_NEW ); if ( mWriteBuffer ) { if ( loc > mWriteBufferSize ) { mWriteLoc = mWriteStop; growWriteBuffer(loc - mWriteBufferSize); } mWriteLoc = &mWriteBuffer[loc]; ret = uint32_t(mWriteLoc-mWriteBuffer); } return ret; } virtual void flush(void) { } virtual uint32_t getFileLength(void) const { uint32_t ret = 0; if ( mReadBuffer ) { ret = uint32_t(mReadStop-mReadBuffer); } else if ( mWriteBuffer ) { ret = uint32_t(mWriteLoc-mWriteBuffer); } return ret; } uint32_t getWriteBufferSize(void) const { return uint32_t(mWriteLoc-mWriteBuffer); } void setWriteLoc(uint8_t *writeLoc) { NV_ASSERT(writeLoc >= mWriteBuffer && writeLoc < mWriteStop ); mWriteLoc = writeLoc; mReadStop = mWriteLoc; } const uint8_t * getWriteBuffer(void) const { return mWriteBuffer; } /** * Attention: if you use aligned allocator you cannot free memory with NV_FREE macros instead use deallocate method from base */ uint8_t * getWriteBufferOwnership(uint32_t &dataLen) // return the write buffer, and zero it out, the caller is taking ownership of the memory { uint8_t *ret = mWriteBuffer; dataLen = uint32_t(mWriteLoc-mWriteBuffer); mWriteBuffer = NULL; mWriteLoc = NULL; mWriteStop = NULL; mWriteBufferSize = 0; return ret; } void alignRead(uint32_t a) { uint32_t loc = tellRead(); uint32_t aloc = ((loc+(a-1))/a)*a; if ( aloc != loc ) { seekRead(aloc); } } void alignWrite(uint32_t a) { uint32_t loc = tellWrite(); uint32_t aloc = ((loc+(a-1))/a)*a; if ( aloc != loc ) { seekWrite(aloc); } } private: // double the size of the write buffer or at least as large as the 'size' value passed in. void growWriteBuffer(uint32_t size) { if ( mWriteBuffer == NULL ) { if ( size < mDefaultWriteBufferSize ) size = mDefaultWriteBufferSize; initWriteBuffer(size); } else { uint32_t oldWriteIndex = uint32_t(mWriteLoc - mWriteBuffer); uint32_t newSize = mWriteBufferSize*2; uint32_t avail = newSize-oldWriteIndex; if ( size >= avail ) newSize = newSize+size; uint8_t *writeBuffer = static_cast<uint8_t *>(mAllocator->allocate(newSize)); NV_ASSERT( writeBuffer ); memmove(writeBuffer,mWriteBuffer,mWriteBufferSize); mAllocator->deallocate(mWriteBuffer); mWriteBuffer = writeBuffer; mWriteBufferSize = newSize; mWriteLoc = &mWriteBuffer[oldWriteIndex]; mWriteStop = &mWriteBuffer[mWriteBufferSize]; uint32_t oldReadLoc = uint32_t(mReadLoc-mReadBuffer); mReadBuffer = mWriteBuffer; mReadStop = mWriteLoc; mReadLoc = &mReadBuffer[oldReadLoc]; } } const uint8_t *mReadBuffer; const uint8_t *mReadLoc; const uint8_t *mReadStop; uint8_t *mWriteBuffer; uint8_t *mWriteLoc; uint8_t *mWriteStop; uint32_t mWriteBufferSize; uint32_t mDefaultWriteBufferSize; Allocator *mAllocator; OpenMode mOpenMode; SeekType mSeekType; }; class NvMemoryBufferAllocator { public: NvMemoryBufferAllocator(uint32_t a = 0) : alignment(a) {} virtual void * allocate(uint32_t size) { switch(alignment) { case 0: return NV_ALLOC(size, NV_DEBUG_EXP("NvMemoryBufferAllocator")); case 16 : return nvidia::AlignedAllocator<16>().allocate(size, __FILE__, __LINE__); case 32 : return nvidia::AlignedAllocator<32>().allocate(size, __FILE__, __LINE__); case 64 : return nvidia::AlignedAllocator<64>().allocate(size, __FILE__, __LINE__); case 128 : return nvidia::AlignedAllocator<128>().allocate(size, __FILE__, __LINE__); default : NV_ASSERT(0); } return NULL; } virtual void deallocate(void *mem) { switch(alignment) { case 0: NV_FREE(mem); break; case 16 : nvidia::AlignedAllocator<16>().deallocate(mem); break; case 32 : nvidia::AlignedAllocator<32>().deallocate(mem); break; case 64 : nvidia::AlignedAllocator<64>().deallocate(mem); break; case 128 : nvidia::AlignedAllocator<128>().deallocate(mem); break; default : NV_ASSERT(0); } } virtual ~NvMemoryBufferAllocator(void) {} private: NvMemoryBufferAllocator& operator=(const NvMemoryBufferAllocator&); const uint32_t alignment; }; //Use this class if you want to use PhysX memory allocator class NsMemoryBuffer: public NvMemoryBufferBase<NvMemoryBufferAllocator>, public UserAllocated { NV_NOCOPY(NsMemoryBuffer) typedef NvMemoryBufferBase<NvMemoryBufferAllocator> BaseClass; public: NsMemoryBuffer(const void *readMem,uint32_t readLen): BaseClass(readMem, readLen) {} NsMemoryBuffer(const void *readMem,uint32_t readLen, uint32_t alignment): BaseClass(readMem, readLen, NvMemoryBufferAllocator(alignment)) {} NsMemoryBuffer(uint32_t defaultWriteBufferSize=BUFFER_SIZE_DEFAULT): BaseClass(defaultWriteBufferSize) {} NsMemoryBuffer(uint32_t defaultWriteBufferSize,uint32_t alignment): BaseClass(defaultWriteBufferSize, NvMemoryBufferAllocator(alignment)) {} }; } using namespace general_NvIOStream2; } #endif // NV_MEMORY_BUFFER_H
13,123
C
27.655022
146
0.592776
NVIDIA-Omniverse/PhysX/blast/source/shared/NsFileBuffer/include/NvFileBuf.h
/* * Copyright 2009-2011 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ #ifndef NV_FILE_BUF_H #define NV_FILE_BUF_H #include "NvCTypes.h" /** \addtogroup foundation @{ */ namespace nvidia { namespace general_NvIOStream2 { NV_PUSH_PACK_DEFAULT /** \brief Callback class for data serialization. The user needs to supply an NvFileBuf implementation to a number of methods to allow the SDK to read or write chunks of binary data. This allows flexibility for the source/destination of the data. For example the NvFileBuf could store data in a file, memory buffer or custom file format. \note It is the users responsibility to ensure that the data is written to the appropriate offset. */ class NvFileBuf { public: enum EndianMode { ENDIAN_NONE = 0, // do no conversion for endian mode ENDIAN_BIG = 1, // always read/write data as natively big endian (Power PC, etc.) ENDIAN_LITTLE = 2 // always read/write data as natively little endian (Intel, etc.) Default Behavior! }; NvFileBuf(EndianMode mode=ENDIAN_LITTLE) { setEndianMode(mode); } virtual ~NvFileBuf(void) { } /** \brief Declares a constant to seek to the end of the stream. * * Does not support streams longer than 32 bits */ static const uint32_t STREAM_SEEK_END=0xFFFFFFFF; enum OpenMode { OPEN_FILE_NOT_FOUND, OPEN_READ_ONLY, // open file buffer stream for read only access OPEN_WRITE_ONLY, // open file buffer stream for write only access OPEN_READ_WRITE_NEW, // open a new file for both read/write access OPEN_READ_WRITE_EXISTING // open an existing file for both read/write access }; virtual OpenMode getOpenMode(void) const = 0; bool isOpen(void) const { return getOpenMode()!=OPEN_FILE_NOT_FOUND; } enum SeekType { SEEKABLE_NO = 0, SEEKABLE_READ = 0x1, SEEKABLE_WRITE = 0x2, SEEKABLE_READWRITE = 0x3 }; virtual SeekType isSeekable(void) const = 0; void setEndianMode(EndianMode e) { mEndianMode = e; if ( (e==ENDIAN_BIG && !isBigEndian() ) || (e==ENDIAN_LITTLE && isBigEndian() ) ) { mEndianSwap = true; } else { mEndianSwap = false; } } EndianMode getEndianMode(void) const { return mEndianMode; } virtual uint32_t getFileLength(void) const = 0; /** \brief Seeks the stream to a particular location for reading * * If the location passed exceeds the length of the stream, then it will seek to the end. * Returns the location it ended up at (useful if you seek to the end) to get the file position */ virtual uint32_t seekRead(uint32_t loc) = 0; /** \brief Seeks the stream to a particular location for writing * * If the location passed exceeds the length of the stream, then it will seek to the end. * Returns the location it ended up at (useful if you seek to the end) to get the file position */ virtual uint32_t seekWrite(uint32_t loc) = 0; /** \brief Reads from the stream into a buffer. \param[out] mem The buffer to read the stream into. \param[in] len The number of bytes to stream into the buffer \return Returns the actual number of bytes read. If not equal to the length requested, then reached end of stream. */ virtual uint32_t read(void *mem,uint32_t len) = 0; /** \brief Reads from the stream into a buffer but does not advance the read location. \param[out] mem The buffer to read the stream into. \param[in] len The number of bytes to stream into the buffer \return Returns the actual number of bytes read. If not equal to the length requested, then reached end of stream. */ virtual uint32_t peek(void *mem,uint32_t len) = 0; /** \brief Writes a buffer of memory to the stream \param[in] mem The address of a buffer of memory to send to the stream. \param[in] len The number of bytes to send to the stream. \return Returns the actual number of bytes sent to the stream. If not equal to the length specific, then the stream is full or unable to write for some reason. */ virtual uint32_t write(const void *mem,uint32_t len) = 0; /** \brief Reports the current stream location read aqccess. \return Returns the current stream read location. */ virtual uint32_t tellRead(void) const = 0; /** \brief Reports the current stream location for write access. \return Returns the current stream write location. */ virtual uint32_t tellWrite(void) const = 0; /** \brief Causes any temporarily cached data to be flushed to the stream. */ virtual void flush(void) = 0; /** \brief Close the stream. */ virtual void close(void) {} void release(void) { delete this; } static NV_INLINE bool isBigEndian() { int32_t i = 1; return *(reinterpret_cast<char*>(&i))==0; } NV_INLINE void swap2Bytes(void* _data) const { char *data = static_cast<char *>(_data); char one_byte; one_byte = data[0]; data[0] = data[1]; data[1] = one_byte; } NV_INLINE void swap4Bytes(void* _data) const { char *data = static_cast<char *>(_data); char one_byte; one_byte = data[0]; data[0] = data[3]; data[3] = one_byte; one_byte = data[1]; data[1] = data[2]; data[2] = one_byte; } NV_INLINE void swap8Bytes(void *_data) const { char *data = static_cast<char *>(_data); char one_byte; one_byte = data[0]; data[0] = data[7]; data[7] = one_byte; one_byte = data[1]; data[1] = data[6]; data[6] = one_byte; one_byte = data[2]; data[2] = data[5]; data[5] = one_byte; one_byte = data[3]; data[3] = data[4]; data[4] = one_byte; } NV_INLINE void storeDword(uint32_t v) { if ( mEndianSwap ) swap4Bytes(&v); write(&v,sizeof(v)); } NV_INLINE void storeFloat(float v) { if ( mEndianSwap ) swap4Bytes(&v); write(&v,sizeof(v)); } NV_INLINE void storeDouble(double v) { if ( mEndianSwap ) swap8Bytes(&v); write(&v,sizeof(v)); } NV_INLINE void storeByte(uint8_t b) { write(&b,sizeof(b)); } NV_INLINE void storeWord(uint16_t w) { if ( mEndianSwap ) swap2Bytes(&w); write(&w,sizeof(w)); } uint8_t readByte(void) { uint8_t v=0; read(&v,sizeof(v)); return v; } uint16_t readWord(void) { uint16_t v=0; read(&v,sizeof(v)); if ( mEndianSwap ) swap2Bytes(&v); return v; } uint32_t readDword(void) { uint32_t v=0; read(&v,sizeof(v)); if ( mEndianSwap ) swap4Bytes(&v); return v; } float readFloat(void) { float v=0; read(&v,sizeof(v)); if ( mEndianSwap ) swap4Bytes(&v); return v; } double readDouble(void) { double v=0; read(&v,sizeof(v)); if ( mEndianSwap ) swap8Bytes(&v); return v; } private: bool mEndianSwap; // whether or not the endian should be swapped on the current platform EndianMode mEndianMode; // the current endian mode behavior for the stream }; NV_POP_PACK } // end of namespace using namespace general_NvIOStream2; namespace general_NvIOStream = general_NvIOStream2; } // end of namespace #endif // NV_FILE_BUF_H
9,603
C
27.330383
164
0.622514