file_path
stringlengths
21
207
content
stringlengths
5
1.02M
size
int64
5
1.02M
lang
stringclasses
9 values
avg_line_length
float64
1.33
100
max_line_length
int64
4
993
alphanum_fraction
float64
0.27
0.93
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdRaycastMesh.h
#ifndef RAYCAST_MESH_H #define RAYCAST_MESH_H #include <stdint.h> namespace VHACD { // Very simple brute force raycast against a triangle mesh. Tests every triangle; no hierachy. // Does a deep copy, always does calculations with full double float precision class RaycastMesh { public: static RaycastMesh * createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh const double *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc. uint32_t tcount, // The number of triangles in the source triangle mesh const uint32_t *indices); // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ... static RaycastMesh * createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh const float *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc. uint32_t tcount, // The number of triangles in the source triangle mesh const uint32_t *indices); // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ... virtual bool raycast(const double *from, // The starting point of the raycast const double *to, // The ending point of the raycast const double *closestToPoint, // The point to match the nearest hit location (can just be the 'from' location of no specific point) double *hitLocation, // The point where the ray hit nearest to the 'closestToPoint' location double *hitDistance) = 0; // The distance the ray traveled to the hit location virtual void release(void) = 0; protected: virtual ~RaycastMesh(void) { }; }; } // end of VHACD namespace #endif
1,853
C
45.349999
145
0.641662
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdMesh.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_MESH_H #define VHACD_MESH_H #include "vhacdSArray.h" #include "vhacdVector.h" #define VHACD_DEBUG_MESH namespace VHACD { enum AXIS { AXIS_X = 0, AXIS_Y = 1, AXIS_Z = 2 }; struct Plane { double m_a; double m_b; double m_c; double m_d; AXIS m_axis; short m_index; }; #ifdef VHACD_DEBUG_MESH struct Material { Vec3<double> m_diffuseColor; double m_ambientIntensity; Vec3<double> m_specularColor; Vec3<double> m_emissiveColor; double m_shininess; double m_transparency; Material(void) { m_diffuseColor.X() = 0.5; m_diffuseColor.Y() = 0.5; m_diffuseColor.Z() = 0.5; m_specularColor.X() = 0.5; m_specularColor.Y() = 0.5; m_specularColor.Z() = 0.5; m_ambientIntensity = 0.4; m_emissiveColor.X() = 0.0; m_emissiveColor.Y() = 0.0; m_emissiveColor.Z() = 0.0; m_shininess = 0.4; m_transparency = 0.0; }; }; #endif // VHACD_DEBUG_MESH //! Triangular mesh data structure class Mesh { public: void AddPoint(const Vec3<double>& pt) { m_points.PushBack(pt); }; void SetPoint(size_t index, const Vec3<double>& pt) { m_points[index] = pt; }; const Vec3<double>& GetPoint(size_t index) const { return m_points[index]; }; Vec3<double>& GetPoint(size_t index) { return m_points[index]; }; size_t GetNPoints() const { return m_points.Size(); }; double* GetPoints() { return (double*)m_points.Data(); } // ugly const double* const GetPoints() const { return (double*)m_points.Data(); } // ugly const Vec3<double>* const GetPointsBuffer() const { return m_points.Data(); } // Vec3<double>* const GetPointsBuffer() { return m_points.Data(); } // void AddTriangle(const Vec3<int32_t>& tri) { m_triangles.PushBack(tri); }; void SetTriangle(size_t index, const Vec3<int32_t>& tri) { m_triangles[index] = tri; }; const Vec3<int32_t>& GetTriangle(size_t index) const { return m_triangles[index]; }; Vec3<int32_t>& GetTriangle(size_t index) { return m_triangles[index]; }; size_t GetNTriangles() const { return m_triangles.Size(); }; int32_t* GetTriangles() { return (int32_t*)m_triangles.Data(); } // ugly const int32_t* const GetTriangles() const { return (int32_t*)m_triangles.Data(); } // ugly const Vec3<int32_t>* const GetTrianglesBuffer() const { return m_triangles.Data(); } Vec3<int32_t>* const GetTrianglesBuffer() { return m_triangles.Data(); } const Vec3<double>& GetCenter() const { return m_center; } const Vec3<double>& GetMinBB() const { return m_minBB; } const Vec3<double>& GetMaxBB() const { return m_maxBB; } void ClearPoints() { m_points.Clear(); } void ClearTriangles() { m_triangles.Clear(); } void Clear() { ClearPoints(); ClearTriangles(); } void ResizePoints(size_t nPts) { m_points.Resize(nPts); } void ResizeTriangles(size_t nTri) { m_triangles.Resize(nTri); } void CopyPoints(SArray<Vec3<double> >& points) const { points = m_points; } double GetDiagBB() const { return m_diag; } double ComputeVolume() const; void ComputeConvexHull(const double* const pts, const size_t nPts); void Clip(const Plane& plane, SArray<Vec3<double> >& positivePart, SArray<Vec3<double> >& negativePart) const; bool IsInside(const Vec3<double>& pt) const; double ComputeDiagBB(); Vec3<double> &ComputeCenter(void); #ifdef VHACD_DEBUG_MESH bool LoadOFF(const std::string& fileName, bool invert); bool SaveVRML2(const std::string& fileName) const; bool SaveVRML2(std::ofstream& fout, const Material& material) const; bool SaveOFF(const std::string& fileName) const; #endif // VHACD_DEBUG_MESH //! Constructor. Mesh(); //! Destructor. ~Mesh(void); private: SArray<Vec3<double> > m_points; SArray<Vec3<int32_t> > m_triangles; Vec3<double> m_minBB; Vec3<double> m_maxBB; Vec3<double> m_center; double m_diag; }; } #endif
5,520
C
41.46923
756
0.683152
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btVector3.h
/* Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_VECTOR3_H #define BT_VECTOR3_H #include "btMinMax.h" #include "btScalar.h" #ifdef BT_USE_DOUBLE_PRECISION #define btVector3Data btVector3DoubleData #define btVector3DataName "btVector3DoubleData" #else #define btVector3Data btVector3FloatData #define btVector3DataName "btVector3FloatData" #endif //BT_USE_DOUBLE_PRECISION /**@brief btVector3 can be used to represent 3D points and vectors. * It has an un-used w component to suit 16-byte alignment when btVector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user * Ideally, this class should be replaced by a platform optimized SIMD version that keeps the data in registers */ ATTRIBUTE_ALIGNED16(class) btVector3 { public: #if defined(__SPU__) && defined(__CELLOS_LV2__) btScalar m_floats[4]; public: SIMD_FORCE_INLINE const vec_float4& get128() const { return *((const vec_float4*)&m_floats[0]); } public: #else //__CELLOS_LV2__ __SPU__ #ifdef BT_USE_SSE // _WIN32 union { __m128 mVec128; btScalar m_floats[4]; }; SIMD_FORCE_INLINE __m128 get128() const { return mVec128; } SIMD_FORCE_INLINE void set128(__m128 v128) { mVec128 = v128; } #else btScalar m_floats[4]; #endif #endif //__CELLOS_LV2__ __SPU__ public: /**@brief No initialization constructor */ SIMD_FORCE_INLINE btVector3() {} /**@brief Constructor from scalars * @param x X value * @param y Y value * @param z Z value */ SIMD_FORCE_INLINE btVector3(const btScalar& x, const btScalar& y, const btScalar& z) { m_floats[0] = x; m_floats[1] = y; m_floats[2] = z; m_floats[3] = btScalar(0.); } /**@brief Add a vector to this one * @param The vector to add to this one */ SIMD_FORCE_INLINE btVector3& operator+=(const btVector3& v) { m_floats[0] += v.m_floats[0]; m_floats[1] += v.m_floats[1]; m_floats[2] += v.m_floats[2]; return *this; } /**@brief Subtract a vector from this one * @param The vector to subtract */ SIMD_FORCE_INLINE btVector3& operator-=(const btVector3& v) { m_floats[0] -= v.m_floats[0]; m_floats[1] -= v.m_floats[1]; m_floats[2] -= v.m_floats[2]; return *this; } /**@brief Scale the vector * @param s Scale factor */ SIMD_FORCE_INLINE btVector3& operator*=(const btScalar& s) { m_floats[0] *= s; m_floats[1] *= s; m_floats[2] *= s; return *this; } /**@brief Inversely scale the vector * @param s Scale factor to divide by */ SIMD_FORCE_INLINE btVector3& operator/=(const btScalar& s) { btFullAssert(s != btScalar(0.0)); return * this *= btScalar(1.0) / s; } /**@brief Return the dot product * @param v The other vector in the dot product */ SIMD_FORCE_INLINE btScalar dot(const btVector3& v) const { return m_floats[0] * v.m_floats[0] + m_floats[1] * v.m_floats[1] + m_floats[2] * v.m_floats[2]; } /**@brief Return the length of the vector squared */ SIMD_FORCE_INLINE btScalar length2() const { return dot(*this); } /**@brief Return the length of the vector */ SIMD_FORCE_INLINE btScalar length() const { return btSqrt(length2()); } /**@brief Return the distance squared between the ends of this and another vector * This is symantically treating the vector like a point */ SIMD_FORCE_INLINE btScalar distance2(const btVector3& v) const; /**@brief Return the distance between the ends of this and another vector * This is symantically treating the vector like a point */ SIMD_FORCE_INLINE btScalar distance(const btVector3& v) const; SIMD_FORCE_INLINE btVector3& safeNormalize() { btVector3 absVec = this->absolute(); int32_t maxIndex = absVec.maxAxis(); if (absVec[maxIndex] > 0) { *this /= absVec[maxIndex]; return * this /= length(); } setValue(1, 0, 0); return *this; } /**@brief Normalize this vector * x^2 + y^2 + z^2 = 1 */ SIMD_FORCE_INLINE btVector3& normalize() { return * this /= length(); } /**@brief Return a normalized version of this vector */ SIMD_FORCE_INLINE btVector3 normalized() const; /**@brief Return a rotated version of this vector * @param wAxis The axis to rotate about * @param angle The angle to rotate by */ SIMD_FORCE_INLINE btVector3 rotate(const btVector3& wAxis, const btScalar angle) const; /**@brief Return the angle between this and another vector * @param v The other vector */ SIMD_FORCE_INLINE btScalar angle(const btVector3& v) const { btScalar s = btSqrt(length2() * v.length2()); btFullAssert(s != btScalar(0.0)); return btAcos(dot(v) / s); } /**@brief Return a vector will the absolute values of each element */ SIMD_FORCE_INLINE btVector3 absolute() const { return btVector3( btFabs(m_floats[0]), btFabs(m_floats[1]), btFabs(m_floats[2])); } /**@brief Return the cross product between this and another vector * @param v The other vector */ SIMD_FORCE_INLINE btVector3 cross(const btVector3& v) const { return btVector3( m_floats[1] * v.m_floats[2] - m_floats[2] * v.m_floats[1], m_floats[2] * v.m_floats[0] - m_floats[0] * v.m_floats[2], m_floats[0] * v.m_floats[1] - m_floats[1] * v.m_floats[0]); } SIMD_FORCE_INLINE btScalar triple(const btVector3& v1, const btVector3& v2) const { return m_floats[0] * (v1.m_floats[1] * v2.m_floats[2] - v1.m_floats[2] * v2.m_floats[1]) + m_floats[1] * (v1.m_floats[2] * v2.m_floats[0] - v1.m_floats[0] * v2.m_floats[2]) + m_floats[2] * (v1.m_floats[0] * v2.m_floats[1] - v1.m_floats[1] * v2.m_floats[0]); } /**@brief Return the axis with the smallest value * Note return values are 0,1,2 for x, y, or z */ SIMD_FORCE_INLINE int32_t minAxis() const { return m_floats[0] < m_floats[1] ? (m_floats[0] < m_floats[2] ? 0 : 2) : (m_floats[1] < m_floats[2] ? 1 : 2); } /**@brief Return the axis with the largest value * Note return values are 0,1,2 for x, y, or z */ SIMD_FORCE_INLINE int32_t maxAxis() const { return m_floats[0] < m_floats[1] ? (m_floats[1] < m_floats[2] ? 2 : 1) : (m_floats[0] < m_floats[2] ? 2 : 0); } SIMD_FORCE_INLINE int32_t furthestAxis() const { return absolute().minAxis(); } SIMD_FORCE_INLINE int32_t closestAxis() const { return absolute().maxAxis(); } SIMD_FORCE_INLINE void setInterpolate3(const btVector3& v0, const btVector3& v1, btScalar rt) { btScalar s = btScalar(1.0) - rt; m_floats[0] = s * v0.m_floats[0] + rt * v1.m_floats[0]; m_floats[1] = s * v0.m_floats[1] + rt * v1.m_floats[1]; m_floats[2] = s * v0.m_floats[2] + rt * v1.m_floats[2]; //don't do the unused w component // m_co[3] = s * v0[3] + rt * v1[3]; } /**@brief Return the linear interpolation between this and another vector * @param v The other vector * @param t The ration of this to v (t = 0 => return this, t=1 => return other) */ SIMD_FORCE_INLINE btVector3 lerp(const btVector3& v, const btScalar& t) const { return btVector3(m_floats[0] + (v.m_floats[0] - m_floats[0]) * t, m_floats[1] + (v.m_floats[1] - m_floats[1]) * t, m_floats[2] + (v.m_floats[2] - m_floats[2]) * t); } /**@brief Elementwise multiply this vector by the other * @param v The other vector */ SIMD_FORCE_INLINE btVector3& operator*=(const btVector3& v) { m_floats[0] *= v.m_floats[0]; m_floats[1] *= v.m_floats[1]; m_floats[2] *= v.m_floats[2]; return *this; } /**@brief Return the x value */ SIMD_FORCE_INLINE const btScalar& getX() const { return m_floats[0]; } /**@brief Return the y value */ SIMD_FORCE_INLINE const btScalar& getY() const { return m_floats[1]; } /**@brief Return the z value */ SIMD_FORCE_INLINE const btScalar& getZ() const { return m_floats[2]; } /**@brief Set the x value */ SIMD_FORCE_INLINE void setX(btScalar x) { m_floats[0] = x; }; /**@brief Set the y value */ SIMD_FORCE_INLINE void setY(btScalar y) { m_floats[1] = y; }; /**@brief Set the z value */ SIMD_FORCE_INLINE void setZ(btScalar z) { m_floats[2] = z; }; /**@brief Set the w value */ SIMD_FORCE_INLINE void setW(btScalar w) { m_floats[3] = w; }; /**@brief Return the x value */ SIMD_FORCE_INLINE const btScalar& x() const { return m_floats[0]; } /**@brief Return the y value */ SIMD_FORCE_INLINE const btScalar& y() const { return m_floats[1]; } /**@brief Return the z value */ SIMD_FORCE_INLINE const btScalar& z() const { return m_floats[2]; } /**@brief Return the w value */ SIMD_FORCE_INLINE const btScalar& w() const { return m_floats[3]; } //SIMD_FORCE_INLINE btScalar& operator[](int32_t i) { return (&m_floats[0])[i]; } //SIMD_FORCE_INLINE const btScalar& operator[](int32_t i) const { return (&m_floats[0])[i]; } ///operator btScalar*() replaces operator[], using implicit conversion. We added operator != and operator == to avoid pointer comparisons. SIMD_FORCE_INLINE operator btScalar*() { return &m_floats[0]; } SIMD_FORCE_INLINE operator const btScalar*() const { return &m_floats[0]; } SIMD_FORCE_INLINE bool operator==(const btVector3& other) const { return ((m_floats[3] == other.m_floats[3]) && (m_floats[2] == other.m_floats[2]) && (m_floats[1] == other.m_floats[1]) && (m_floats[0] == other.m_floats[0])); } SIMD_FORCE_INLINE bool operator!=(const btVector3& other) const { return !(*this == other); } /**@brief Set each element to the max of the current values and the values of another btVector3 * @param other The other btVector3 to compare with */ SIMD_FORCE_INLINE void setMax(const btVector3& other) { btSetMax(m_floats[0], other.m_floats[0]); btSetMax(m_floats[1], other.m_floats[1]); btSetMax(m_floats[2], other.m_floats[2]); btSetMax(m_floats[3], other.w()); } /**@brief Set each element to the min of the current values and the values of another btVector3 * @param other The other btVector3 to compare with */ SIMD_FORCE_INLINE void setMin(const btVector3& other) { btSetMin(m_floats[0], other.m_floats[0]); btSetMin(m_floats[1], other.m_floats[1]); btSetMin(m_floats[2], other.m_floats[2]); btSetMin(m_floats[3], other.w()); } SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z) { m_floats[0] = x; m_floats[1] = y; m_floats[2] = z; m_floats[3] = btScalar(0.); } void getSkewSymmetricMatrix(btVector3 * v0, btVector3 * v1, btVector3 * v2) const { v0->setValue(0., -z(), y()); v1->setValue(z(), 0., -x()); v2->setValue(-y(), x(), 0.); } void setZero() { setValue(btScalar(0.), btScalar(0.), btScalar(0.)); } SIMD_FORCE_INLINE bool isZero() const { return m_floats[0] == btScalar(0) && m_floats[1] == btScalar(0) && m_floats[2] == btScalar(0); } SIMD_FORCE_INLINE bool fuzzyZero() const { return length2() < SIMD_EPSILON; } SIMD_FORCE_INLINE void serialize(struct btVector3Data & dataOut) const; SIMD_FORCE_INLINE void deSerialize(const struct btVector3Data& dataIn); SIMD_FORCE_INLINE void serializeFloat(struct btVector3FloatData & dataOut) const; SIMD_FORCE_INLINE void deSerializeFloat(const struct btVector3FloatData& dataIn); SIMD_FORCE_INLINE void serializeDouble(struct btVector3DoubleData & dataOut) const; SIMD_FORCE_INLINE void deSerializeDouble(const struct btVector3DoubleData& dataIn); }; /**@brief Return the sum of two vectors (Point symantics)*/ SIMD_FORCE_INLINE btVector3 operator+(const btVector3& v1, const btVector3& v2) { return btVector3(v1.m_floats[0] + v2.m_floats[0], v1.m_floats[1] + v2.m_floats[1], v1.m_floats[2] + v2.m_floats[2]); } /**@brief Return the elementwise product of two vectors */ SIMD_FORCE_INLINE btVector3 operator*(const btVector3& v1, const btVector3& v2) { return btVector3(v1.m_floats[0] * v2.m_floats[0], v1.m_floats[1] * v2.m_floats[1], v1.m_floats[2] * v2.m_floats[2]); } /**@brief Return the difference between two vectors */ SIMD_FORCE_INLINE btVector3 operator-(const btVector3& v1, const btVector3& v2) { return btVector3(v1.m_floats[0] - v2.m_floats[0], v1.m_floats[1] - v2.m_floats[1], v1.m_floats[2] - v2.m_floats[2]); } /**@brief Return the negative of the vector */ SIMD_FORCE_INLINE btVector3 operator-(const btVector3& v) { return btVector3(-v.m_floats[0], -v.m_floats[1], -v.m_floats[2]); } /**@brief Return the vector scaled by s */ SIMD_FORCE_INLINE btVector3 operator*(const btVector3& v, const btScalar& s) { return btVector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s); } /**@brief Return the vector scaled by s */ SIMD_FORCE_INLINE btVector3 operator*(const btScalar& s, const btVector3& v) { return v * s; } /**@brief Return the vector inversely scaled by s */ SIMD_FORCE_INLINE btVector3 operator/(const btVector3& v, const btScalar& s) { btFullAssert(s != btScalar(0.0)); return v * (btScalar(1.0) / s); } /**@brief Return the vector inversely scaled by s */ SIMD_FORCE_INLINE btVector3 operator/(const btVector3& v1, const btVector3& v2) { return btVector3(v1.m_floats[0] / v2.m_floats[0], v1.m_floats[1] / v2.m_floats[1], v1.m_floats[2] / v2.m_floats[2]); } /**@brief Return the dot product between two vectors */ SIMD_FORCE_INLINE btScalar btDot(const btVector3& v1, const btVector3& v2) { return v1.dot(v2); } /**@brief Return the distance squared between two vectors */ SIMD_FORCE_INLINE btScalar btDistance2(const btVector3& v1, const btVector3& v2) { return v1.distance2(v2); } /**@brief Return the distance between two vectors */ SIMD_FORCE_INLINE btScalar btDistance(const btVector3& v1, const btVector3& v2) { return v1.distance(v2); } /**@brief Return the angle between two vectors */ SIMD_FORCE_INLINE btScalar btAngle(const btVector3& v1, const btVector3& v2) { return v1.angle(v2); } /**@brief Return the cross product of two vectors */ SIMD_FORCE_INLINE btVector3 btCross(const btVector3& v1, const btVector3& v2) { return v1.cross(v2); } SIMD_FORCE_INLINE btScalar btTriple(const btVector3& v1, const btVector3& v2, const btVector3& v3) { return v1.triple(v2, v3); } /**@brief Return the linear interpolation between two vectors * @param v1 One vector * @param v2 The other vector * @param t The ration of this to v (t = 0 => return v1, t=1 => return v2) */ SIMD_FORCE_INLINE btVector3 lerp(const btVector3& v1, const btVector3& v2, const btScalar& t) { return v1.lerp(v2, t); } SIMD_FORCE_INLINE btScalar btVector3::distance2(const btVector3& v) const { return (v - *this).length2(); } SIMD_FORCE_INLINE btScalar btVector3::distance(const btVector3& v) const { return (v - *this).length(); } SIMD_FORCE_INLINE btVector3 btVector3::normalized() const { return *this / length(); } SIMD_FORCE_INLINE btVector3 btVector3::rotate(const btVector3& wAxis, const btScalar angle) const { // wAxis must be a unit lenght vector btVector3 o = wAxis * wAxis.dot(*this); btVector3 x = *this - o; btVector3 y; y = wAxis.cross(*this); return (o + x * btCos(angle) + y * btSin(angle)); } class btVector4 : public btVector3 { public: SIMD_FORCE_INLINE btVector4() {} SIMD_FORCE_INLINE btVector4(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w) : btVector3(x, y, z) { m_floats[3] = w; } SIMD_FORCE_INLINE btVector4 absolute4() const { return btVector4( btFabs(m_floats[0]), btFabs(m_floats[1]), btFabs(m_floats[2]), btFabs(m_floats[3])); } btScalar getW() const { return m_floats[3]; } SIMD_FORCE_INLINE int32_t maxAxis4() const { int32_t maxIndex = -1; btScalar maxVal = btScalar(-BT_LARGE_FLOAT); if (m_floats[0] > maxVal) { maxIndex = 0; maxVal = m_floats[0]; } if (m_floats[1] > maxVal) { maxIndex = 1; maxVal = m_floats[1]; } if (m_floats[2] > maxVal) { maxIndex = 2; maxVal = m_floats[2]; } if (m_floats[3] > maxVal) { maxIndex = 3; } return maxIndex; } SIMD_FORCE_INLINE int32_t minAxis4() const { int32_t minIndex = -1; btScalar minVal = btScalar(BT_LARGE_FLOAT); if (m_floats[0] < minVal) { minIndex = 0; minVal = m_floats[0]; } if (m_floats[1] < minVal) { minIndex = 1; minVal = m_floats[1]; } if (m_floats[2] < minVal) { minIndex = 2; minVal = m_floats[2]; } if (m_floats[3] < minVal) { minIndex = 3; } return minIndex; } SIMD_FORCE_INLINE int32_t closestAxis4() const { return absolute4().maxAxis4(); } /**@brief Set x,y,z and zero w * @param x Value of x * @param y Value of y * @param z Value of z */ /* void getValue(btScalar *m) const { m[0] = m_floats[0]; m[1] = m_floats[1]; m[2] =m_floats[2]; } */ /**@brief Set the values * @param x Value of x * @param y Value of y * @param z Value of z * @param w Value of w */ SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w) { m_floats[0] = x; m_floats[1] = y; m_floats[2] = z; m_floats[3] = w; } }; ///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar& sourceVal, btScalar& destVal) { #ifdef BT_USE_DOUBLE_PRECISION unsigned char* dest = (unsigned char*)&destVal; unsigned char* src = (unsigned char*)&sourceVal; dest[0] = src[7]; dest[1] = src[6]; dest[2] = src[5]; dest[3] = src[4]; dest[4] = src[3]; dest[5] = src[2]; dest[6] = src[1]; dest[7] = src[0]; #else unsigned char* dest = (unsigned char*)&destVal; unsigned char* src = (unsigned char*)&sourceVal; dest[0] = src[3]; dest[1] = src[2]; dest[2] = src[1]; dest[3] = src[0]; #endif //BT_USE_DOUBLE_PRECISION } ///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3& sourceVec, btVector3& destVec) { for (int32_t i = 0; i < 4; i++) { btSwapScalarEndian(sourceVec[i], destVec[i]); } } ///btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3& vector) { btVector3 swappedVec; for (int32_t i = 0; i < 4; i++) { btSwapScalarEndian(vector[i], swappedVec[i]); } vector = swappedVec; } template <class T> SIMD_FORCE_INLINE void btPlaneSpace1(const T& n, T& p, T& q) { if (btFabs(n[2]) > SIMDSQRT12) { // choose p in y-z plane btScalar a = n[1] * n[1] + n[2] * n[2]; btScalar k = btRecipSqrt(a); p[0] = 0; p[1] = -n[2] * k; p[2] = n[1] * k; // set q = n x p q[0] = a * k; q[1] = -n[0] * p[2]; q[2] = n[0] * p[1]; } else { // choose p in x-y plane btScalar a = n[0] * n[0] + n[1] * n[1]; btScalar k = btRecipSqrt(a); p[0] = -n[1] * k; p[1] = n[0] * k; p[2] = 0; // set q = n x p q[0] = -n[2] * p[1]; q[1] = n[2] * p[0]; q[2] = a * k; } } struct btVector3FloatData { float m_floats[4]; }; struct btVector3DoubleData { double m_floats[4]; }; SIMD_FORCE_INLINE void btVector3::serializeFloat(struct btVector3FloatData& dataOut) const { ///could also do a memcpy, check if it is worth it for (int32_t i = 0; i < 4; i++) dataOut.m_floats[i] = float(m_floats[i]); } SIMD_FORCE_INLINE void btVector3::deSerializeFloat(const struct btVector3FloatData& dataIn) { for (int32_t i = 0; i < 4; i++) m_floats[i] = btScalar(dataIn.m_floats[i]); } SIMD_FORCE_INLINE void btVector3::serializeDouble(struct btVector3DoubleData& dataOut) const { ///could also do a memcpy, check if it is worth it for (int32_t i = 0; i < 4; i++) dataOut.m_floats[i] = double(m_floats[i]); } SIMD_FORCE_INLINE void btVector3::deSerializeDouble(const struct btVector3DoubleData& dataIn) { for (int32_t i = 0; i < 4; i++) m_floats[i] = btScalar(dataIn.m_floats[i]); } SIMD_FORCE_INLINE void btVector3::serialize(struct btVector3Data& dataOut) const { ///could also do a memcpy, check if it is worth it for (int32_t i = 0; i < 4; i++) dataOut.m_floats[i] = m_floats[i]; } SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data& dataIn) { for (int32_t i = 0; i < 4; i++) m_floats[i] = dataIn.m_floats[i]; } #endif //BT_VECTOR3_H
22,579
C
30.536313
265
0.613845
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdVHACD.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_VHACD_H #define VHACD_VHACD_H #ifdef OPENCL_FOUND #ifdef __MACH__ #include <OpenCL/cl.h> #else #include <CL/cl.h> #endif #endif //OPENCL_FOUND #include "vhacdMutex.h" #include "vhacdVolume.h" #include "vhacdRaycastMesh.h" #include <vector> typedef std::vector< VHACD::IVHACD::Constraint > ConstraintVector; #define USE_THREAD 1 #define OCL_MIN_NUM_PRIMITIVES 4096 #define CH_APP_MIN_NUM_PRIMITIVES 64000 namespace VHACD { class VHACD : public IVHACD { public: //! Constructor. VHACD() { #if USE_THREAD == 1 && _OPENMP m_ompNumProcessors = 2 * omp_get_num_procs(); omp_set_num_threads(m_ompNumProcessors); #else //USE_THREAD == 1 && _OPENMP m_ompNumProcessors = 1; #endif //USE_THREAD == 1 && _OPENMP #ifdef CL_VERSION_1_1 m_oclWorkGroupSize = 0; m_oclDevice = 0; m_oclQueue = 0; m_oclKernelComputePartialVolumes = 0; m_oclKernelComputeSum = 0; #endif //CL_VERSION_1_1 Init(); } //! Destructor. ~VHACD(void) { } uint32_t GetNConvexHulls() const { return (uint32_t)m_convexHulls.Size(); } void Cancel() { SetCancel(true); } void GetConvexHull(const uint32_t index, ConvexHull& ch) const { Mesh* mesh = m_convexHulls[index]; ch.m_nPoints = (uint32_t)mesh->GetNPoints(); ch.m_nTriangles = (uint32_t)mesh->GetNTriangles(); ch.m_points = mesh->GetPoints(); ch.m_triangles = (uint32_t *)mesh->GetTriangles(); ch.m_volume = mesh->ComputeVolume(); Vec3<double> &center = mesh->ComputeCenter(); ch.m_center[0] = center.X(); ch.m_center[1] = center.Y(); ch.m_center[2] = center.Z(); } void Clean(void) { if (mRaycastMesh) { mRaycastMesh->release(); mRaycastMesh = nullptr; } delete m_volume; delete m_pset; size_t nCH = m_convexHulls.Size(); for (size_t p = 0; p < nCH; ++p) { delete m_convexHulls[p]; } m_convexHulls.Clear(); Init(); } void Release(void) { delete this; } bool Compute(const float* const points, const uint32_t nPoints, const uint32_t* const triangles, const uint32_t nTriangles, const Parameters& params); bool Compute(const double* const points, const uint32_t nPoints, const uint32_t* const triangles, const uint32_t nTriangles, const Parameters& params); bool OCLInit(void* const oclDevice, IUserLogger* const logger = 0); bool OCLRelease(IUserLogger* const logger = 0); virtual bool ComputeCenterOfMass(double centerOfMass[3]) const; // Will analyze the HACD results and compute the constraints solutions. // It will analyze the point at which any two convex hulls touch each other and // return the total number of constraint pairs found virtual uint32_t ComputeConstraints(void); // Returns a pointer to the constraint index; null if the index is not valid or // the user did not previously call 'ComputeConstraints' virtual const Constraint *GetConstraint(uint32_t index) const; private: void SetCancel(bool cancel) { m_cancelMutex.Lock(); m_cancel = cancel; m_cancelMutex.Unlock(); } bool GetCancel() { m_cancelMutex.Lock(); bool cancel = m_cancel; m_cancelMutex.Unlock(); return cancel; } void Update(const double stageProgress, const double operationProgress, const Parameters& params) { m_stageProgress = stageProgress; m_operationProgress = operationProgress; if (params.m_callback) { params.m_callback->Update(m_overallProgress, m_stageProgress, m_operationProgress, m_stage.c_str(), m_operation.c_str()); } } void Init() { if (mRaycastMesh) { mRaycastMesh->release(); mRaycastMesh = nullptr; } memset(m_rot, 0, sizeof(double) * 9); m_dim = 64; m_volume = 0; m_volumeCH0 = 0.0; m_pset = 0; m_overallProgress = 0.0; m_stageProgress = 0.0; m_operationProgress = 0.0; m_stage = ""; m_operation = ""; m_barycenter[0] = m_barycenter[1] = m_barycenter[2] = 0.0; m_rot[0][0] = m_rot[1][1] = m_rot[2][2] = 1.0; SetCancel(false); } void ComputePrimitiveSet(const Parameters& params); void ComputeACD(const Parameters& params); void MergeConvexHulls(const Parameters& params); void SimplifyConvexHull(Mesh* const ch, const size_t nvertices, const double minVolume); void SimplifyConvexHulls(const Parameters& params); void ComputeBestClippingPlane(const PrimitiveSet* inputPSet, const double volume, const SArray<Plane>& planes, const Vec3<double>& preferredCuttingDirection, const double w, const double alpha, const double beta, const int32_t convexhullDownsampling, const double progress0, const double progress1, Plane& bestPlane, double& minConcavity, const Parameters& params); template <class T> void AlignMesh(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles, const Parameters& params) { if (GetCancel() || !params.m_pca) { return; } m_timer.Tic(); m_stage = "Align mesh"; m_operation = "Voxelization"; std::ostringstream msg; if (params.m_logger) { msg << "+ " << m_stage << std::endl; params.m_logger->Log(msg.str().c_str()); } Update(0.0, 0.0, params); if (GetCancel()) { return; } m_dim = (size_t)(pow((double)params.m_resolution, 1.0 / 3.0) + 0.5); Volume volume; volume.Voxelize(points, stridePoints, nPoints, triangles, strideTriangles, nTriangles, m_dim, m_barycenter, m_rot); size_t n = volume.GetNPrimitivesOnSurf() + volume.GetNPrimitivesInsideSurf(); Update(50.0, 100.0, params); if (params.m_logger) { msg.str(""); msg << "\t dim = " << m_dim << "\t-> " << n << " voxels" << std::endl; params.m_logger->Log(msg.str().c_str()); } if (GetCancel()) { return; } m_operation = "PCA"; Update(50.0, 0.0, params); volume.AlignToPrincipalAxes(m_rot); m_overallProgress = 1.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } template <class T> void VoxelizeMesh(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles, const Parameters& params) { if (GetCancel()) { return; } m_timer.Tic(); m_stage = "Voxelization"; std::ostringstream msg; if (params.m_logger) { msg << "+ " << m_stage << std::endl; params.m_logger->Log(msg.str().c_str()); } delete m_volume; m_volume = 0; int32_t iteration = 0; const int32_t maxIteration = 5; double progress = 0.0; while (iteration++ < maxIteration && !m_cancel) { msg.str(""); msg << "Iteration " << iteration; m_operation = msg.str(); progress = iteration * 100.0 / maxIteration; Update(progress, 0.0, params); m_volume = new Volume; m_volume->Voxelize(points, stridePoints, nPoints, triangles, strideTriangles, nTriangles, m_dim, m_barycenter, m_rot); Update(progress, 100.0, params); size_t n = m_volume->GetNPrimitivesOnSurf() + m_volume->GetNPrimitivesInsideSurf(); if (params.m_logger) { msg.str(""); msg << "\t dim = " << m_dim << "\t-> " << n << " voxels" << std::endl; params.m_logger->Log(msg.str().c_str()); } double a = pow((double)(params.m_resolution) / n, 0.33); size_t dim_next = (size_t)(m_dim * a + 0.5); if (n < params.m_resolution && iteration < maxIteration && m_volume->GetNPrimitivesOnSurf() < params.m_resolution / 8 && m_dim != dim_next) { delete m_volume; m_volume = 0; m_dim = dim_next; } else { break; } } m_overallProgress = 10.0; Update(100.0, 100.0, params); m_timer.Toc(); if (params.m_logger) { msg.str(""); msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl; params.m_logger->Log(msg.str().c_str()); } } template <class T> bool ComputeACD(const T* const points, const uint32_t nPoints, const uint32_t* const triangles, const uint32_t nTriangles, const Parameters& params) { Init(); if (params.m_projectHullVertices) { mRaycastMesh = RaycastMesh::createRaycastMesh(nPoints, points, nTriangles, (const uint32_t *)triangles); } if (params.m_oclAcceleration) { // build kernels } AlignMesh(points, 3, nPoints, (int32_t *)triangles, 3, nTriangles, params); VoxelizeMesh(points, 3, nPoints, (int32_t *)triangles, 3, nTriangles, params); ComputePrimitiveSet(params); ComputeACD(params); MergeConvexHulls(params); SimplifyConvexHulls(params); if (params.m_oclAcceleration) { // Release kernels } if (GetCancel()) { Clean(); return false; } return true; } private: RaycastMesh *mRaycastMesh{ nullptr }; SArray<Mesh*> m_convexHulls; std::string m_stage; std::string m_operation; double m_overallProgress; double m_stageProgress; double m_operationProgress; double m_rot[3][3]; double m_volumeCH0; Vec3<double> m_barycenter; Timer m_timer; size_t m_dim; Volume* m_volume; PrimitiveSet* m_pset; Mutex m_cancelMutex; bool m_cancel; int32_t m_ompNumProcessors; #ifdef CL_VERSION_1_1 cl_device_id* m_oclDevice; cl_context m_oclContext; cl_program m_oclProgram; cl_command_queue* m_oclQueue; cl_kernel* m_oclKernelComputePartialVolumes; cl_kernel* m_oclKernelComputeSum; size_t m_oclWorkGroupSize; #endif //CL_VERSION_1_1 ConstraintVector mConstraints; }; } #endif // VHACD_VHACD_H
12,732
C
32.158854
755
0.589695
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdVolume.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_VOLUME_H #define VHACD_VOLUME_H #include "vhacdMesh.h" #include "vhacdVector.h" #include <assert.h> #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4456 4701) #endif namespace VHACD { enum VOXEL_VALUE { PRIMITIVE_UNDEFINED = 0, PRIMITIVE_OUTSIDE_SURFACE = 1, PRIMITIVE_INSIDE_SURFACE = 2, PRIMITIVE_ON_SURFACE = 3 }; struct Voxel { public: short m_coord[3]; short m_data; }; class PrimitiveSet { public: virtual ~PrimitiveSet(){}; virtual PrimitiveSet* Create() const = 0; virtual const size_t GetNPrimitives() const = 0; virtual const size_t GetNPrimitivesOnSurf() const = 0; virtual const size_t GetNPrimitivesInsideSurf() const = 0; virtual const double GetEigenValue(AXIS axis) const = 0; virtual const double ComputeMaxVolumeError() const = 0; virtual const double ComputeVolume() const = 0; virtual void Clip(const Plane& plane, PrimitiveSet* const positivePart, PrimitiveSet* const negativePart) const = 0; virtual void Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const = 0; virtual void ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const = 0; virtual void ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const = 0; virtual void SelectOnSurface(PrimitiveSet* const onSurfP) const = 0; virtual void ComputeConvexHull(Mesh& meshCH, const size_t sampling = 1) const = 0; virtual void ComputeBB() = 0; virtual void ComputePrincipalAxes() = 0; virtual void AlignToPrincipalAxes() = 0; virtual void RevertAlignToPrincipalAxes() = 0; virtual void Convert(Mesh& mesh, const VOXEL_VALUE value) const = 0; const Mesh& GetConvexHull() const { return m_convexHull; }; Mesh& GetConvexHull() { return m_convexHull; }; private: Mesh m_convexHull; }; //! class VoxelSet : public PrimitiveSet { friend class Volume; public: //! Destructor. ~VoxelSet(void); //! Constructor. VoxelSet(); const size_t GetNPrimitives() const { return m_voxels.Size(); } const size_t GetNPrimitivesOnSurf() const { return m_numVoxelsOnSurface; } const size_t GetNPrimitivesInsideSurf() const { return m_numVoxelsInsideSurface; } const double GetEigenValue(AXIS axis) const { return m_D[axis][axis]; } const double ComputeVolume() const { return m_unitVolume * m_voxels.Size(); } const double ComputeMaxVolumeError() const { return m_unitVolume * m_numVoxelsOnSurface; } const Vec3<short>& GetMinBBVoxels() const { return m_minBBVoxels; } const Vec3<short>& GetMaxBBVoxels() const { return m_maxBBVoxels; } const Vec3<double>& GetMinBB() const { return m_minBB; } const double& GetScale() const { return m_scale; } const double& GetUnitVolume() const { return m_unitVolume; } Vec3<double> GetPoint(Vec3<short> voxel) const { return Vec3<double>(voxel[0] * m_scale + m_minBB[0], voxel[1] * m_scale + m_minBB[1], voxel[2] * m_scale + m_minBB[2]); } Vec3<double> GetPoint(const Voxel& voxel) const { return Vec3<double>(voxel.m_coord[0] * m_scale + m_minBB[0], voxel.m_coord[1] * m_scale + m_minBB[1], voxel.m_coord[2] * m_scale + m_minBB[2]); } Vec3<double> GetPoint(Vec3<double> voxel) const { return Vec3<double>(voxel[0] * m_scale + m_minBB[0], voxel[1] * m_scale + m_minBB[1], voxel[2] * m_scale + m_minBB[2]); } void GetPoints(const Voxel& voxel, Vec3<double>* const pts) const; void ComputeConvexHull(Mesh& meshCH, const size_t sampling = 1) const; void Clip(const Plane& plane, PrimitiveSet* const positivePart, PrimitiveSet* const negativePart) const; void Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const; void ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const; void ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const; void SelectOnSurface(PrimitiveSet* const onSurfP) const; void ComputeBB(); void Convert(Mesh& mesh, const VOXEL_VALUE value) const; void ComputePrincipalAxes(); PrimitiveSet* Create() const { return new VoxelSet(); } void AlignToPrincipalAxes(){}; void RevertAlignToPrincipalAxes(){}; Voxel* const GetVoxels() { return m_voxels.Data(); } const Voxel* const GetVoxels() const { return m_voxels.Data(); } private: size_t m_numVoxelsOnSurface; size_t m_numVoxelsInsideSurface; Vec3<double> m_minBB; double m_scale; SArray<Voxel, 8> m_voxels; double m_unitVolume; Vec3<double> m_minBBPts; Vec3<double> m_maxBBPts; Vec3<short> m_minBBVoxels; Vec3<short> m_maxBBVoxels; Vec3<short> m_barycenter; double m_Q[3][3]; double m_D[3][3]; Vec3<double> m_barycenterPCA; }; struct Tetrahedron { public: Vec3<double> m_pts[4]; unsigned char m_data; }; //! class TetrahedronSet : public PrimitiveSet { friend class Volume; public: //! Destructor. ~TetrahedronSet(void); //! Constructor. TetrahedronSet(); const size_t GetNPrimitives() const { return m_tetrahedra.Size(); } const size_t GetNPrimitivesOnSurf() const { return m_numTetrahedraOnSurface; } const size_t GetNPrimitivesInsideSurf() const { return m_numTetrahedraInsideSurface; } const Vec3<double>& GetMinBB() const { return m_minBB; } const Vec3<double>& GetMaxBB() const { return m_maxBB; } const Vec3<double>& GetBarycenter() const { return m_barycenter; } const double GetEigenValue(AXIS axis) const { return m_D[axis][axis]; } const double GetSacle() const { return m_scale; } const double ComputeVolume() const; const double ComputeMaxVolumeError() const; void ComputeConvexHull(Mesh& meshCH, const size_t sampling = 1) const; void ComputePrincipalAxes(); void AlignToPrincipalAxes(); void RevertAlignToPrincipalAxes(); void Clip(const Plane& plane, PrimitiveSet* const positivePart, PrimitiveSet* const negativePart) const; void Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts, SArray<Vec3<double> >* const negativePts, const size_t sampling) const; void ComputeExteriorPoints(const Plane& plane, const Mesh& mesh, SArray<Vec3<double> >* const exteriorPts) const; void ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const; void SelectOnSurface(PrimitiveSet* const onSurfP) const; void ComputeBB(); void Convert(Mesh& mesh, const VOXEL_VALUE value) const; inline bool Add(Tetrahedron& tetrahedron); PrimitiveSet* Create() const { return new TetrahedronSet(); } static const double EPS; private: void AddClippedTetrahedra(const Vec3<double> (&pts)[10], const int32_t nPts); size_t m_numTetrahedraOnSurface; size_t m_numTetrahedraInsideSurface; double m_scale; Vec3<double> m_minBB; Vec3<double> m_maxBB; Vec3<double> m_barycenter; SArray<Tetrahedron, 8> m_tetrahedra; double m_Q[3][3]; double m_D[3][3]; }; //! class Volume { public: //! Destructor. ~Volume(void); //! Constructor. Volume(); //! Voxelize template <class T> void Voxelize(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles, const size_t dim, const Vec3<double>& barycenter, const double (&rot)[3][3]); unsigned char& GetVoxel(const size_t i, const size_t j, const size_t k) { assert(i < m_dim[0] || i >= 0); assert(j < m_dim[0] || j >= 0); assert(k < m_dim[0] || k >= 0); return m_data[i + j * m_dim[0] + k * m_dim[0] * m_dim[1]]; } const unsigned char& GetVoxel(const size_t i, const size_t j, const size_t k) const { assert(i < m_dim[0] || i >= 0); assert(j < m_dim[0] || j >= 0); assert(k < m_dim[0] || k >= 0); return m_data[i + j * m_dim[0] + k * m_dim[0] * m_dim[1]]; } const size_t GetNPrimitivesOnSurf() const { return m_numVoxelsOnSurface; } const size_t GetNPrimitivesInsideSurf() const { return m_numVoxelsInsideSurface; } void Convert(Mesh& mesh, const VOXEL_VALUE value) const; void Convert(VoxelSet& vset) const; void Convert(TetrahedronSet& tset) const; void AlignToPrincipalAxes(double (&rot)[3][3]) const; private: void FillOutsideSurface(const size_t i0, const size_t j0, const size_t k0, const size_t i1, const size_t j1, const size_t k1); void FillInsideSurface(); template <class T> void ComputeBB(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const Vec3<double>& barycenter, const double (&rot)[3][3]); void Allocate(); void Free(); Vec3<double> m_minBB; Vec3<double> m_maxBB; double m_scale; size_t m_dim[3]; //>! dim size_t m_numVoxelsOnSurface; size_t m_numVoxelsInsideSurface; size_t m_numVoxelsOutsideSurface; unsigned char* m_data; }; int32_t TriBoxOverlap(const Vec3<double>& boxcenter, const Vec3<double>& boxhalfsize, const Vec3<double>& triver0, const Vec3<double>& triver1, const Vec3<double>& triver2); template <class T> inline void ComputeAlignedPoint(const T* const points, const uint32_t idx, const Vec3<double>& barycenter, const double (&rot)[3][3], Vec3<double>& pt){}; template <> inline void ComputeAlignedPoint<float>(const float* const points, const uint32_t idx, const Vec3<double>& barycenter, const double (&rot)[3][3], Vec3<double>& pt) { double x = points[idx + 0] - barycenter[0]; double y = points[idx + 1] - barycenter[1]; double z = points[idx + 2] - barycenter[2]; pt[0] = rot[0][0] * x + rot[1][0] * y + rot[2][0] * z; pt[1] = rot[0][1] * x + rot[1][1] * y + rot[2][1] * z; pt[2] = rot[0][2] * x + rot[1][2] * y + rot[2][2] * z; } template <> inline void ComputeAlignedPoint<double>(const double* const points, const uint32_t idx, const Vec3<double>& barycenter, const double (&rot)[3][3], Vec3<double>& pt) { double x = points[idx + 0] - barycenter[0]; double y = points[idx + 1] - barycenter[1]; double z = points[idx + 2] - barycenter[2]; pt[0] = rot[0][0] * x + rot[1][0] * y + rot[2][0] * z; pt[1] = rot[0][1] * x + rot[1][1] * y + rot[2][1] * z; pt[2] = rot[0][2] * x + rot[1][2] * y + rot[2][2] * z; } template <class T> void Volume::ComputeBB(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const Vec3<double>& barycenter, const double (&rot)[3][3]) { Vec3<double> pt; ComputeAlignedPoint(points, 0, barycenter, rot, pt); m_maxBB = pt; m_minBB = pt; for (uint32_t v = 1; v < nPoints; ++v) { ComputeAlignedPoint(points, v * stridePoints, barycenter, rot, pt); for (int32_t i = 0; i < 3; ++i) { if (pt[i] < m_minBB[i]) m_minBB[i] = pt[i]; else if (pt[i] > m_maxBB[i]) m_maxBB[i] = pt[i]; } } } template <class T> void Volume::Voxelize(const T* const points, const uint32_t stridePoints, const uint32_t nPoints, const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles, const size_t dim, const Vec3<double>& barycenter, const double (&rot)[3][3]) { if (nPoints == 0) { return; } ComputeBB(points, stridePoints, nPoints, barycenter, rot); double d[3] = { m_maxBB[0] - m_minBB[0], m_maxBB[1] - m_minBB[1], m_maxBB[2] - m_minBB[2] }; double r; if (d[0] > d[1] && d[0] > d[2]) { r = d[0]; m_dim[0] = dim; m_dim[1] = 2 + static_cast<size_t>(dim * d[1] / d[0]); m_dim[2] = 2 + static_cast<size_t>(dim * d[2] / d[0]); } else if (d[1] > d[0] && d[1] > d[2]) { r = d[1]; m_dim[1] = dim; m_dim[0] = 2 + static_cast<size_t>(dim * d[0] / d[1]); m_dim[2] = 2 + static_cast<size_t>(dim * d[2] / d[1]); } else { r = d[2]; m_dim[2] = dim; m_dim[0] = 2 + static_cast<size_t>(dim * d[0] / d[2]); m_dim[1] = 2 + static_cast<size_t>(dim * d[1] / d[2]); } m_scale = r / (dim - 1); double invScale = (dim - 1) / r; Allocate(); m_numVoxelsOnSurface = 0; m_numVoxelsInsideSurface = 0; m_numVoxelsOutsideSurface = 0; Vec3<double> p[3]; size_t i, j, k; size_t i0, j0, k0; size_t i1, j1, k1; Vec3<double> boxcenter; Vec3<double> pt; const Vec3<double> boxhalfsize(0.5, 0.5, 0.5); for (size_t t = 0, ti = 0; t < nTriangles; ++t, ti += strideTriangles) { Vec3<int32_t> tri(triangles[ti + 0], triangles[ti + 1], triangles[ti + 2]); for (int32_t c = 0; c < 3; ++c) { ComputeAlignedPoint(points, tri[c] * stridePoints, barycenter, rot, pt); p[c][0] = (pt[0] - m_minBB[0]) * invScale; p[c][1] = (pt[1] - m_minBB[1]) * invScale; p[c][2] = (pt[2] - m_minBB[2]) * invScale; i = static_cast<size_t>(p[c][0] + 0.5); j = static_cast<size_t>(p[c][1] + 0.5); k = static_cast<size_t>(p[c][2] + 0.5); assert(i < m_dim[0] && i >= 0 && j < m_dim[1] && j >= 0 && k < m_dim[2] && k >= 0); if (c == 0) { i0 = i1 = i; j0 = j1 = j; k0 = k1 = k; } else { if (i < i0) i0 = i; if (j < j0) j0 = j; if (k < k0) k0 = k; if (i > i1) i1 = i; if (j > j1) j1 = j; if (k > k1) k1 = k; } } if (i0 > 0) --i0; if (j0 > 0) --j0; if (k0 > 0) --k0; if (i1 < m_dim[0]) ++i1; if (j1 < m_dim[1]) ++j1; if (k1 < m_dim[2]) ++k1; for (size_t i = i0; i < i1; ++i) { boxcenter[0] = (double)i; for (size_t j = j0; j < j1; ++j) { boxcenter[1] = (double)j; for (size_t k = k0; k < k1; ++k) { boxcenter[2] = (double)k; int32_t res = TriBoxOverlap(boxcenter, boxhalfsize, p[0], p[1], p[2]); unsigned char& value = GetVoxel(i, j, k); if (res == 1 && value == PRIMITIVE_UNDEFINED) { value = PRIMITIVE_ON_SURFACE; ++m_numVoxelsOnSurface; } } } } } FillOutsideSurface(0, 0, 0, m_dim[0], m_dim[1], 1); FillOutsideSurface(0, 0, m_dim[2] - 1, m_dim[0], m_dim[1], m_dim[2]); FillOutsideSurface(0, 0, 0, m_dim[0], 1, m_dim[2]); FillOutsideSurface(0, m_dim[1] - 1, 0, m_dim[0], m_dim[1], m_dim[2]); FillOutsideSurface(0, 0, 0, 1, m_dim[1], m_dim[2]); FillOutsideSurface(m_dim[0] - 1, 0, 0, m_dim[0], m_dim[1], m_dim[2]); FillInsideSurface(); } } #ifdef _MSC_VER #pragma warning(pop) #endif #endif // VHACD_VOLUME_H
17,055
C
38.573086
756
0.612899
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdTimer.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_TIMER_H #define VHACD_TIMER_H #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers #endif #include <windows.h> #elif __MACH__ #include <mach/clock.h> #include <mach/mach.h> #else #include <sys/time.h> #include <time.h> #endif namespace VHACD { #ifdef _WIN32 class Timer { public: Timer(void) { m_start.QuadPart = 0; m_stop.QuadPart = 0; QueryPerformanceFrequency(&m_freq); }; ~Timer(void){}; void Tic() { QueryPerformanceCounter(&m_start); } void Toc() { QueryPerformanceCounter(&m_stop); } double GetElapsedTime() // in ms { LARGE_INTEGER delta; delta.QuadPart = m_stop.QuadPart - m_start.QuadPart; return (1000.0 * delta.QuadPart) / (double)m_freq.QuadPart; } private: LARGE_INTEGER m_start; LARGE_INTEGER m_stop; LARGE_INTEGER m_freq; }; #elif __MACH__ class Timer { public: Timer(void) { memset(this, 0, sizeof(Timer)); host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &m_cclock); }; ~Timer(void) { mach_port_deallocate(mach_task_self(), m_cclock); }; void Tic() { clock_get_time(m_cclock, &m_start); } void Toc() { clock_get_time(m_cclock, &m_stop); } double GetElapsedTime() // in ms { return 1000.0 * (m_stop.tv_sec - m_start.tv_sec + (1.0E-9) * (m_stop.tv_nsec - m_start.tv_nsec)); } private: clock_serv_t m_cclock; mach_timespec_t m_start; mach_timespec_t m_stop; }; #else class Timer { public: Timer(void) { memset(this, 0, sizeof(Timer)); }; ~Timer(void){}; void Tic() { clock_gettime(CLOCK_REALTIME, &m_start); } void Toc() { clock_gettime(CLOCK_REALTIME, &m_stop); } double GetElapsedTime() // in ms { return 1000.0 * (m_stop.tv_sec - m_start.tv_sec + (1.0E-9) * (m_stop.tv_nsec - m_start.tv_nsec)); } private: struct timespec m_start; struct timespec m_stop; }; #endif } #endif // VHACD_TIMER_H
3,644
C
28.877049
756
0.671515
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdManifoldMesh.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_MANIFOLD_MESH_H #define VHACD_MANIFOLD_MESH_H #include "vhacdCircularList.h" #include "vhacdSArray.h" #include "vhacdVector.h" namespace VHACD { class TMMTriangle; class TMMEdge; class TMMesh; class ICHull; //! Vertex data structure used in a triangular manifold mesh (TMM). class TMMVertex { public: void Initialize(); TMMVertex(void); ~TMMVertex(void); private: Vec3<double> m_pos; int32_t m_name; size_t m_id; CircularListElement<TMMEdge>* m_duplicate; // pointer to incident cone edge (or NULL) bool m_onHull; bool m_tag; TMMVertex(const TMMVertex& rhs); friend class ICHull; friend class TMMesh; friend class TMMTriangle; friend class TMMEdge; }; //! Edge data structure used in a triangular manifold mesh (TMM). class TMMEdge { public: void Initialize(); TMMEdge(void); ~TMMEdge(void); private: size_t m_id; CircularListElement<TMMTriangle>* m_triangles[2]; CircularListElement<TMMVertex>* m_vertices[2]; CircularListElement<TMMTriangle>* m_newFace; TMMEdge(const TMMEdge& rhs); friend class ICHull; friend class TMMTriangle; friend class TMMVertex; friend class TMMesh; }; //! Triangle data structure used in a triangular manifold mesh (TMM). class TMMTriangle { public: void Initialize(); TMMTriangle(void); ~TMMTriangle(void); private: size_t m_id; CircularListElement<TMMEdge>* m_edges[3]; CircularListElement<TMMVertex>* m_vertices[3]; bool m_visible; TMMTriangle(const TMMTriangle& rhs); friend class ICHull; friend class TMMesh; friend class TMMVertex; friend class TMMEdge; }; //! triangular manifold mesh data structure. class TMMesh { public: //! Returns the number of vertices> inline size_t GetNVertices() const { return m_vertices.GetSize(); } //! Returns the number of edges inline size_t GetNEdges() const { return m_edges.GetSize(); } //! Returns the number of triangles inline size_t GetNTriangles() const { return m_triangles.GetSize(); } //! Returns the vertices circular list inline const CircularList<TMMVertex>& GetVertices() const { return m_vertices; } //! Returns the edges circular list inline const CircularList<TMMEdge>& GetEdges() const { return m_edges; } //! Returns the triangles circular list inline const CircularList<TMMTriangle>& GetTriangles() const { return m_triangles; } //! Returns the vertices circular list inline CircularList<TMMVertex>& GetVertices() { return m_vertices; } //! Returns the edges circular list inline CircularList<TMMEdge>& GetEdges() { return m_edges; } //! Returns the triangles circular list inline CircularList<TMMTriangle>& GetTriangles() { return m_triangles; } //! Add vertex to the mesh CircularListElement<TMMVertex>* AddVertex() { return m_vertices.Add(); } //! Add vertex to the mesh CircularListElement<TMMEdge>* AddEdge() { return m_edges.Add(); } //! Add vertex to the mesh CircularListElement<TMMTriangle>* AddTriangle() { return m_triangles.Add(); } //! Print mesh information void Print(); //! void GetIFS(Vec3<double>* const points, Vec3<int32_t>* const triangles); //! void Clear(); //! void Copy(TMMesh& mesh); //! bool CheckConsistancy(); //! bool Normalize(); //! bool Denormalize(); //! Constructor TMMesh(); //! Destructor virtual ~TMMesh(void); private: CircularList<TMMVertex> m_vertices; CircularList<TMMEdge> m_edges; CircularList<TMMTriangle> m_triangles; // not defined TMMesh(const TMMesh& rhs); friend class ICHull; }; } #endif // VHACD_MANIFOLD_MESH_H
5,225
C
35.802817
756
0.716938
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btAlignedAllocator.h
/* Bullet Continuous Collision Detection and Physics Library Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_ALIGNED_ALLOCATOR #define BT_ALIGNED_ALLOCATOR ///we probably replace this with our own aligned memory allocator ///so we replace _aligned_malloc and _aligned_free with our own ///that is better portable and more predictable #include "btScalar.h" //#define BT_DEBUG_MEMORY_ALLOCATIONS 1 #ifdef BT_DEBUG_MEMORY_ALLOCATIONS #define btAlignedAlloc(a, b) \ btAlignedAllocInternal(a, b, __LINE__, __FILE__) #define btAlignedFree(ptr) \ btAlignedFreeInternal(ptr, __LINE__, __FILE__) void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename); void btAlignedFreeInternal(void* ptr, int32_t line, char* filename); #else void* btAlignedAllocInternal(size_t size, int32_t alignment); void btAlignedFreeInternal(void* ptr); #define btAlignedAlloc(size, alignment) btAlignedAllocInternal(size, alignment) #define btAlignedFree(ptr) btAlignedFreeInternal(ptr) #endif typedef int32_t size_type; typedef void*(btAlignedAllocFunc)(size_t size, int32_t alignment); typedef void(btAlignedFreeFunc)(void* memblock); typedef void*(btAllocFunc)(size_t size); typedef void(btFreeFunc)(void* memblock); ///The developer can let all Bullet memory allocations go through a custom memory allocator, using btAlignedAllocSetCustom void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc); ///If the developer has already an custom aligned allocator, then btAlignedAllocSetCustomAligned can be used. The default aligned allocator pre-allocates extra memory using the non-aligned allocator, and instruments it. void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc); ///The btAlignedAllocator is a portable class for aligned memory allocations. ///Default implementations for unaligned and aligned allocations can be overridden by a custom allocator using btAlignedAllocSetCustom and btAlignedAllocSetCustomAligned. template <typename T, unsigned Alignment> class btAlignedAllocator { typedef btAlignedAllocator<T, Alignment> self_type; public: //just going down a list: btAlignedAllocator() {} /* btAlignedAllocator( const self_type & ) {} */ template <typename Other> btAlignedAllocator(const btAlignedAllocator<Other, Alignment>&) {} typedef const T* const_pointer; typedef const T& const_reference; typedef T* pointer; typedef T& reference; typedef T value_type; pointer address(reference ref) const { return &ref; } const_pointer address(const_reference ref) const { return &ref; } pointer allocate(size_type n, const_pointer* hint = 0) { (void)hint; return reinterpret_cast<pointer>(btAlignedAlloc(sizeof(value_type) * n, Alignment)); } void construct(pointer ptr, const value_type& value) { new (ptr) value_type(value); } void deallocate(pointer ptr) { btAlignedFree(reinterpret_cast<void*>(ptr)); } void destroy(pointer ptr) { ptr->~value_type(); } template <typename O> struct rebind { typedef btAlignedAllocator<O, Alignment> other; }; template <typename O> self_type& operator=(const btAlignedAllocator<O, Alignment>&) { return *this; } friend bool operator==(const self_type&, const self_type&) { return true; } }; #endif //BT_ALIGNED_ALLOCATOR
4,253
C
39.514285
243
0.758288
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btConvexHullComputer.h
/* Copyright (c) 2011 Ole Kniemeyer, MAXON, www.maxon.net This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_CONVEX_HULL_COMPUTER_H #define BT_CONVEX_HULL_COMPUTER_H #include "btAlignedObjectArray.h" #include "btVector3.h" /// Convex hull implementation based on Preparata and Hong /// See http://code.google.com/p/bullet/issues/detail?id=275 /// Ole Kniemeyer, MAXON Computer GmbH class btConvexHullComputer { private: btScalar compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp); public: class Edge { private: int32_t next; int32_t reverse; int32_t targetVertex; friend class btConvexHullComputer; public: int32_t getSourceVertex() const { return (this + reverse)->targetVertex; } int32_t getTargetVertex() const { return targetVertex; } const Edge* getNextEdgeOfVertex() const // clockwise list of all edges of a vertex { return this + next; } const Edge* getNextEdgeOfFace() const // counter-clockwise list of all edges of a face { return (this + reverse)->getNextEdgeOfVertex(); } const Edge* getReverseEdge() const { return this + reverse; } }; // Vertices of the output hull btAlignedObjectArray<btVector3> vertices; // Edges of the output hull btAlignedObjectArray<Edge> edges; // Faces of the convex hull. Each entry is an index into the "edges" array pointing to an edge of the face. Faces are planar n-gons btAlignedObjectArray<int32_t> faces; /* Compute convex hull of "count" vertices stored in "coords". "stride" is the difference in bytes between the addresses of consecutive vertices. If "shrink" is positive, the convex hull is shrunken by that amount (each face is moved by "shrink" length units towards the center along its normal). If "shrinkClamp" is positive, "shrink" is clamped to not exceed "shrinkClamp * innerRadius", where "innerRadius" is the minimum distance of a face to the center of the convex hull. The returned value is the amount by which the hull has been shrunken. If it is negative, the amount was so large that the resulting convex hull is empty. The output convex hull can be found in the member variables "vertices", "edges", "faces". */ btScalar compute(const float* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { return compute(coords, false, stride, count, shrink, shrinkClamp); } // same as above, but double precision btScalar compute(const double* coords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp) { return compute(coords, true, stride, count, shrink, shrinkClamp); } }; #endif //BT_CONVEX_HULL_COMPUTER_H
3,745
C
37.224489
243
0.695861
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdSArray.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_SARRAY_H #define VHACD_SARRAY_H #include <stdio.h> #include <stdlib.h> #include <string.h> #define SARRAY_DEFAULT_MIN_SIZE 16 namespace VHACD { //! SArray. template <typename T, size_t N = 64> class SArray { public: T& operator[](size_t i) { T* const data = Data(); return data[i]; } const T& operator[](size_t i) const { const T* const data = Data(); return data[i]; } size_t Size() const { return m_size; } T* const Data() { return (m_maxSize == N) ? m_data0 : m_data; } const T* const Data() const { return (m_maxSize == N) ? m_data0 : m_data; } void Clear() { m_size = 0; delete[] m_data; m_data = 0; m_maxSize = N; } void PopBack() { --m_size; } void Allocate(size_t size) { if (size > m_maxSize) { T* temp = new T[size]; memcpy(temp, Data(), m_size * sizeof(T)); delete[] m_data; m_data = temp; m_maxSize = size; } } void Resize(size_t size) { Allocate(size); m_size = size; } void PushBack(const T& value) { if (m_size == m_maxSize) { size_t maxSize = (m_maxSize << 1); T* temp = new T[maxSize]; memcpy(temp, Data(), m_maxSize * sizeof(T)); delete[] m_data; m_data = temp; m_maxSize = maxSize; } T* const data = Data(); data[m_size++] = value; } bool Find(const T& value, size_t& pos) { T* const data = Data(); for (pos = 0; pos < m_size; ++pos) if (value == data[pos]) return true; return false; } bool Insert(const T& value) { size_t pos; if (Find(value, pos)) return false; PushBack(value); return true; } bool Erase(const T& value) { size_t pos; T* const data = Data(); if (Find(value, pos)) { for (size_t j = pos + 1; j < m_size; ++j) data[j - 1] = data[j]; --m_size; return true; } return false; } void operator=(const SArray& rhs) { if (m_maxSize < rhs.m_size) { delete[] m_data; m_maxSize = rhs.m_maxSize; m_data = new T[m_maxSize]; } m_size = rhs.m_size; memcpy(Data(), rhs.Data(), m_size * sizeof(T)); } void Initialize() { m_data = 0; m_size = 0; m_maxSize = N; } SArray(const SArray& rhs) { m_data = 0; m_size = 0; m_maxSize = N; *this = rhs; } SArray() { Initialize(); } ~SArray() { delete[] m_data; } private: T m_data0[N]; T* m_data; size_t m_size; size_t m_maxSize; }; } #endif
4,473
C
27.316456
756
0.565169
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdVector.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_VECTOR_H #define VHACD_VECTOR_H #include <iostream> #include <math.h> namespace VHACD { //! Vector dim 3. template <typename T> class Vec3 { public: T& operator[](size_t i) { return m_data[i]; } const T& operator[](size_t i) const { return m_data[i]; } T& X(); T& Y(); T& Z(); const T& X() const; const T& Y() const; const T& Z() const; void Normalize(); T GetNorm() const; void operator=(const Vec3& rhs); void operator+=(const Vec3& rhs); void operator-=(const Vec3& rhs); void operator-=(T a); void operator+=(T a); void operator/=(T a); void operator*=(T a); Vec3 operator^(const Vec3& rhs) const; T operator*(const Vec3& rhs) const; Vec3 operator+(const Vec3& rhs) const; Vec3 operator-(const Vec3& rhs) const; Vec3 operator-() const; Vec3 operator*(T rhs) const; Vec3 operator/(T rhs) const; bool operator<(const Vec3& rhs) const; bool operator>(const Vec3& rhs) const; Vec3(); Vec3(T a); Vec3(T x, T y, T z); Vec3(const Vec3& rhs); /*virtual*/ ~Vec3(void); // Compute the center of this bounding box and return the diagonal length T GetCenter(const Vec3 &bmin, const Vec3 &bmax) { X() = (bmin.X() + bmax.X())*0.5; Y() = (bmin.Y() + bmax.Y())*0.5; Z() = (bmin.Z() + bmax.Z())*0.5; T dx = bmax.X() - bmin.X(); T dy = bmax.Y() - bmin.Y(); T dz = bmax.Z() - bmin.Z(); T diagonal = T(sqrt(dx*dx + dy*dy + dz*dz)); return diagonal; } // Update the min/max values relative to this point void UpdateMinMax(Vec3 &bmin,Vec3 &bmax) const { if (X() < bmin.X()) { bmin.X() = X(); } if (Y() < bmin.Y()) { bmin.Y() = Y(); } if (Z() < bmin.Z()) { bmin.Z() = Z(); } if (X() > bmax.X()) { bmax.X() = X(); } if (X() > bmax.X()) { bmax.X() = X(); } if (Y() > bmax.Y()) { bmax.Y() = Y(); } if (Z() > bmax.Z()) { bmax.Z() = Z(); } } // Returns the squared distance between these two points T GetDistanceSquared(const Vec3 &p) const { T dx = X() - p.X(); T dy = Y() - p.Y(); T dz = Z() - p.Z(); return dx*dx + dy*dy + dz*dz; } T GetDistance(const Vec3 &p) const { return sqrt(GetDistanceSquared(p)); } // Returns the raw vector data as a pointer T* GetData(void) { return m_data; } private: T m_data[3]; }; //! Vector dim 2. template <typename T> class Vec2 { public: T& operator[](size_t i) { return m_data[i]; } const T& operator[](size_t i) const { return m_data[i]; } T& X(); T& Y(); const T& X() const; const T& Y() const; void Normalize(); T GetNorm() const; void operator=(const Vec2& rhs); void operator+=(const Vec2& rhs); void operator-=(const Vec2& rhs); void operator-=(T a); void operator+=(T a); void operator/=(T a); void operator*=(T a); T operator^(const Vec2& rhs) const; T operator*(const Vec2& rhs) const; Vec2 operator+(const Vec2& rhs) const; Vec2 operator-(const Vec2& rhs) const; Vec2 operator-() const; Vec2 operator*(T rhs) const; Vec2 operator/(T rhs) const; Vec2(); Vec2(T a); Vec2(T x, T y); Vec2(const Vec2& rhs); /*virtual*/ ~Vec2(void); private: T m_data[2]; }; template <typename T> const bool Colinear(const Vec3<T>& a, const Vec3<T>& b, const Vec3<T>& c); template <typename T> const T ComputeVolume4(const Vec3<T>& a, const Vec3<T>& b, const Vec3<T>& c, const Vec3<T>& d); } #include "vhacdVector.inl" // template implementation #endif
5,364
C
30.934524
756
0.598993
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/FloatMath.h
#ifndef FLOAT_MATH_LIB_H #define FLOAT_MATH_LIB_H #include <float.h> #include <stdint.h> namespace FLOAT_MATH { enum FM_ClipState { FMCS_XMIN = (1<<0), FMCS_XMAX = (1<<1), FMCS_YMIN = (1<<2), FMCS_YMAX = (1<<3), FMCS_ZMIN = (1<<4), FMCS_ZMAX = (1<<5), }; enum FM_Axis { FM_XAXIS = (1<<0), FM_YAXIS = (1<<1), FM_ZAXIS = (1<<2) }; enum LineSegmentType { LS_START, LS_MIDDLE, LS_END }; const float FM_PI = 3.1415926535897932384626433832795028841971693993751f; const float FM_DEG_TO_RAD = ((2.0f * FM_PI) / 360.0f); const float FM_RAD_TO_DEG = (360.0f / (2.0f * FM_PI)); //***************** Float versions //*** //*** vectors are assumed to be 3 floats or 3 doubles representing X, Y, Z //*** quaternions are assumed to be 4 floats or 4 doubles representing X,Y,Z,W //*** matrices are assumed to be 16 floats or 16 doubles representing a standard D3D or OpenGL style 4x4 matrix //*** bounding volumes are expressed as two sets of 3 floats/double representing bmin(x,y,z) and bmax(x,y,z) //*** Plane equations are assumed to be 4 floats or 4 doubles representing Ax,By,Cz,D FM_Axis fm_getDominantAxis(const float normal[3]); FM_Axis fm_getDominantAxis(const double normal[3]); void fm_decomposeTransform(const float local_transform[16],float trans[3],float rot[4],float scale[3]); void fm_decomposeTransform(const double local_transform[16],double trans[3],double rot[4],double scale[3]); void fm_multiplyTransform(const float *pA,const float *pB,float *pM); void fm_multiplyTransform(const double *pA,const double *pB,double *pM); void fm_inverseTransform(const float matrix[16],float inverse_matrix[16]); void fm_inverseTransform(const double matrix[16],double inverse_matrix[16]); void fm_identity(float matrix[16]); // set 4x4 matrix to identity. void fm_identity(double matrix[16]); // set 4x4 matrix to identity. void fm_inverseRT(const float matrix[16], const float pos[3], float t[3]); // inverse rotate translate the point. void fm_inverseRT(const double matrix[16],const double pos[3],double t[3]); // inverse rotate translate the point. void fm_transform(const float matrix[16], const float pos[3], float t[3]); // rotate and translate this point. void fm_transform(const double matrix[16],const double pos[3],double t[3]); // rotate and translate this point. float fm_getDeterminant(const float matrix[16]); double fm_getDeterminant(const double matrix[16]); void fm_getSubMatrix(int32_t ki,int32_t kj,float pDst[16],const float matrix[16]); void fm_getSubMatrix(int32_t ki,int32_t kj,double pDst[16],const float matrix[16]); void fm_rotate(const float matrix[16],const float pos[3],float t[3]); // only rotate the point by a 4x4 matrix, don't translate. void fm_rotate(const double matri[16],const double pos[3],double t[3]); // only rotate the point by a 4x4 matrix, don't translate. void fm_eulerToMatrix(float ax,float ay,float az,float matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) void fm_eulerToMatrix(double ax,double ay,double az,double matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) void fm_getAABB(uint32_t vcount,const float *points,uint32_t pstride,float bmin[3],float bmax[3]); void fm_getAABB(uint32_t vcount,const double *points,uint32_t pstride,double bmin[3],double bmax[3]); void fm_getAABBCenter(const float bmin[3],const float bmax[3],float center[3]); void fm_getAABBCenter(const double bmin[3],const double bmax[3],double center[3]); void fm_transformAABB(const float bmin[3],const float bmax[3],const float matrix[16],float tbmin[3],float tbmax[3]); void fm_transformAABB(const double bmin[3],const double bmax[3],const double matrix[16],double tbmin[3],double tbmax[3]); void fm_eulerToQuat(float x,float y,float z,float quat[4]); // convert euler angles to quaternion. void fm_eulerToQuat(double x,double y,double z,double quat[4]); // convert euler angles to quaternion. void fm_quatToEuler(const float quat[4],float &ax,float &ay,float &az); void fm_quatToEuler(const double quat[4],double &ax,double &ay,double &az); void fm_eulerToQuat(const float euler[3],float quat[4]); // convert euler angles to quaternion. Angles must be radians not degrees! void fm_eulerToQuat(const double euler[3],double quat[4]); // convert euler angles to quaternion. void fm_scale(float x,float y,float z,float matrix[16]); // apply scale to the matrix. void fm_scale(double x,double y,double z,double matrix[16]); // apply scale to the matrix. void fm_eulerToQuatDX(float x,float y,float z,float quat[4]); // convert euler angles to quaternion using the fucked up DirectX method void fm_eulerToQuatDX(double x,double y,double z,double quat[4]); // convert euler angles to quaternion using the fucked up DirectX method void fm_eulerToMatrixDX(float x,float y,float z,float matrix[16]); // convert euler angles to quaternion using the fucked up DirectX method. void fm_eulerToMatrixDX(double x,double y,double z,double matrix[16]); // convert euler angles to quaternion using the fucked up DirectX method. void fm_quatToMatrix(const float quat[4],float matrix[16]); // convert quaterinion rotation to matrix, translation set to zero. void fm_quatToMatrix(const double quat[4],double matrix[16]); // convert quaterinion rotation to matrix, translation set to zero. void fm_quatRotate(const float quat[4],const float v[3],float r[3]); // rotate a vector directly by a quaternion. void fm_quatRotate(const double quat[4],const double v[3],double r[3]); // rotate a vector directly by a quaternion. void fm_getTranslation(const float matrix[16],float t[3]); void fm_getTranslation(const double matrix[16],double t[3]); void fm_setTranslation(const float *translation,float matrix[16]); void fm_setTranslation(const double *translation,double matrix[16]); void fm_multiplyQuat(const float *qa,const float *qb,float *quat); void fm_multiplyQuat(const double *qa,const double *qb,double *quat); void fm_matrixToQuat(const float matrix[16],float quat[4]); // convert the 3x3 portion of a 4x4 matrix into a quaterion as x,y,z,w void fm_matrixToQuat(const double matrix[16],double quat[4]); // convert the 3x3 portion of a 4x4 matrix into a quaterion as x,y,z,w float fm_sphereVolume(float radius); // return's the volume of a sphere of this radius (4/3 PI * R cubed ) double fm_sphereVolume(double radius); // return's the volume of a sphere of this radius (4/3 PI * R cubed ) float fm_cylinderVolume(float radius,float h); double fm_cylinderVolume(double radius,double h); float fm_capsuleVolume(float radius,float h); double fm_capsuleVolume(double radius,double h); float fm_distance(const float p1[3],const float p2[3]); double fm_distance(const double p1[3],const double p2[3]); float fm_distanceSquared(const float p1[3],const float p2[3]); double fm_distanceSquared(const double p1[3],const double p2[3]); float fm_distanceSquaredXZ(const float p1[3],const float p2[3]); double fm_distanceSquaredXZ(const double p1[3],const double p2[3]); float fm_computePlane(const float p1[3],const float p2[3],const float p3[3],float *n); // return D double fm_computePlane(const double p1[3],const double p2[3],const double p3[3],double *n); // return D float fm_distToPlane(const float plane[4],const float pos[3]); // computes the distance of this point from the plane. double fm_distToPlane(const double plane[4],const double pos[3]); // computes the distance of this point from the plane. float fm_dot(const float p1[3],const float p2[3]); double fm_dot(const double p1[3],const double p2[3]); void fm_cross(float cross[3],const float a[3],const float b[3]); void fm_cross(double cross[3],const double a[3],const double b[3]); void fm_computeNormalVector(float n[3],const float p1[3],const float p2[3]); // as P2-P1 normalized. void fm_computeNormalVector(double n[3],const double p1[3],const double p2[3]); // as P2-P1 normalized. bool fm_computeWindingOrder(const float p1[3],const float p2[3],const float p3[3]); // returns true if the triangle is clockwise. bool fm_computeWindingOrder(const double p1[3],const double p2[3],const double p3[3]); // returns true if the triangle is clockwise. float fm_normalize(float n[3]); // normalize this vector and return the distance double fm_normalize(double n[3]); // normalize this vector and return the distance float fm_normalizeQuat(float n[4]); // normalize this quat double fm_normalizeQuat(double n[4]); // normalize this quat void fm_matrixMultiply(const float A[16],const float B[16],float dest[16]); void fm_matrixMultiply(const double A[16],const double B[16],double dest[16]); void fm_composeTransform(const float position[3],const float quat[4],const float scale[3],float matrix[16]); void fm_composeTransform(const double position[3],const double quat[4],const double scale[3],double matrix[16]); float fm_computeArea(const float p1[3],const float p2[3],const float p3[3]); double fm_computeArea(const double p1[3],const double p2[3],const double p3[3]); void fm_lerp(const float p1[3],const float p2[3],float dest[3],float lerpValue); void fm_lerp(const double p1[3],const double p2[3],double dest[3],double lerpValue); bool fm_insideTriangleXZ(const float test[3],const float p1[3],const float p2[3],const float p3[3]); bool fm_insideTriangleXZ(const double test[3],const double p1[3],const double p2[3],const double p3[3]); bool fm_insideAABB(const float pos[3],const float bmin[3],const float bmax[3]); bool fm_insideAABB(const double pos[3],const double bmin[3],const double bmax[3]); bool fm_insideAABB(const float obmin[3],const float obmax[3],const float tbmin[3],const float tbmax[3]); // test if bounding box tbmin/tmbax is fully inside obmin/obmax bool fm_insideAABB(const double obmin[3],const double obmax[3],const double tbmin[3],const double tbmax[3]); // test if bounding box tbmin/tmbax is fully inside obmin/obmax uint32_t fm_clipTestPoint(const float bmin[3],const float bmax[3],const float pos[3]); uint32_t fm_clipTestPoint(const double bmin[3],const double bmax[3],const double pos[3]); uint32_t fm_clipTestPointXZ(const float bmin[3],const float bmax[3],const float pos[3]); // only tests X and Z, not Y uint32_t fm_clipTestPointXZ(const double bmin[3],const double bmax[3],const double pos[3]); // only tests X and Z, not Y uint32_t fm_clipTestAABB(const float bmin[3],const float bmax[3],const float p1[3],const float p2[3],const float p3[3],uint32_t &andCode); uint32_t fm_clipTestAABB(const double bmin[3],const double bmax[3],const double p1[3],const double p2[3],const double p3[3],uint32_t &andCode); bool fm_lineTestAABBXZ(const float p1[3],const float p2[3],const float bmin[3],const float bmax[3],float &time); bool fm_lineTestAABBXZ(const double p1[3],const double p2[3],const double bmin[3],const double bmax[3],double &time); bool fm_lineTestAABB(const float p1[3],const float p2[3],const float bmin[3],const float bmax[3],float &time); bool fm_lineTestAABB(const double p1[3],const double p2[3],const double bmin[3],const double bmax[3],double &time); void fm_initMinMax(const float p[3],float bmin[3],float bmax[3]); void fm_initMinMax(const double p[3],double bmin[3],double bmax[3]); void fm_initMinMax(float bmin[3],float bmax[3]); void fm_initMinMax(double bmin[3],double bmax[3]); void fm_minmax(const float p[3],float bmin[3],float bmax[3]); // accumulate to a min-max value void fm_minmax(const double p[3],double bmin[3],double bmax[3]); // accumulate to a min-max value // Computes the diagonal length of the bounding box and then inflates the bounding box on all sides // by the ratio provided. void fm_inflateMinMax(float bmin[3], float bmax[3], float ratio); void fm_inflateMinMax(double bmin[3], double bmax[3], double ratio); float fm_solveX(const float plane[4],float y,float z); // solve for X given this plane equation and the other two components. double fm_solveX(const double plane[4],double y,double z); // solve for X given this plane equation and the other two components. float fm_solveY(const float plane[4],float x,float z); // solve for Y given this plane equation and the other two components. double fm_solveY(const double plane[4],double x,double z); // solve for Y given this plane equation and the other two components. float fm_solveZ(const float plane[4],float x,float y); // solve for Z given this plane equation and the other two components. double fm_solveZ(const double plane[4],double x,double y); // solve for Z given this plane equation and the other two components. bool fm_computeBestFitPlane(uint32_t vcount, // number of input data points const float *points, // starting address of points array. uint32_t vstride, // stride between input points. const float *weights, // *optional point weighting values. uint32_t wstride, // weight stride for each vertex. float plane[4]); bool fm_computeBestFitPlane(uint32_t vcount, // number of input data points const double *points, // starting address of points array. uint32_t vstride, // stride between input points. const double *weights, // *optional point weighting values. uint32_t wstride, // weight stride for each vertex. double plane[4]); bool fm_computeCentroid(uint32_t vcount, // number of input data points const float *points, // starting address of points array. uint32_t vstride, // stride between input points. float *center); bool fm_computeCentroid(uint32_t vcount, // number of input data points const double *points, // starting address of points array. uint32_t vstride, // stride between input points. double *center); float fm_computeBestFitAABB(uint32_t vcount,const float *points,uint32_t pstride,float bmin[3],float bmax[3]); // returns the diagonal distance double fm_computeBestFitAABB(uint32_t vcount,const double *points,uint32_t pstride,double bmin[3],double bmax[3]); // returns the diagonal distance float fm_computeBestFitSphere(uint32_t vcount,const float *points,uint32_t pstride,float center[3]); double fm_computeBestFitSphere(uint32_t vcount,const double *points,uint32_t pstride,double center[3]); bool fm_lineSphereIntersect(const float center[3],float radius,const float p1[3],const float p2[3],float intersect[3]); bool fm_lineSphereIntersect(const double center[3],double radius,const double p1[3],const double p2[3],double intersect[3]); bool fm_intersectRayAABB(const float bmin[3],const float bmax[3],const float pos[3],const float dir[3],float intersect[3]); bool fm_intersectLineSegmentAABB(const float bmin[3],const float bmax[3],const float p1[3],const float p2[3],float intersect[3]); bool fm_lineIntersectsTriangle(const float rayStart[3],const float rayEnd[3],const float p1[3],const float p2[3],const float p3[3],float sect[3]); bool fm_lineIntersectsTriangle(const double rayStart[3],const double rayEnd[3],const double p1[3],const double p2[3],const double p3[3],double sect[3]); bool fm_rayIntersectsTriangle(const float origin[3],const float dir[3],const float v0[3],const float v1[3],const float v2[3],float &t); bool fm_rayIntersectsTriangle(const double origin[3],const double dir[3],const double v0[3],const double v1[3],const double v2[3],double &t); bool fm_raySphereIntersect(const float center[3],float radius,const float pos[3],const float dir[3],float distance,float intersect[3]); bool fm_raySphereIntersect(const double center[3],double radius,const double pos[3],const double dir[3],double distance,double intersect[3]); void fm_catmullRom(float out_vector[3],const float p1[3],const float p2[3],const float p3[3],const float *p4, const float s); void fm_catmullRom(double out_vector[3],const double p1[3],const double p2[3],const double p3[3],const double *p4, const double s); bool fm_intersectAABB(const float bmin1[3],const float bmax1[3],const float bmin2[3],const float bmax2[3]); bool fm_intersectAABB(const double bmin1[3],const double bmax1[3],const double bmin2[3],const double bmax2[3]); // computes the rotation quaternion to go from unit-vector v0 to unit-vector v1 void fm_rotationArc(const float v0[3],const float v1[3],float quat[4]); void fm_rotationArc(const double v0[3],const double v1[3],double quat[4]); float fm_distancePointLineSegment(const float Point[3],const float LineStart[3],const float LineEnd[3],float intersection[3],LineSegmentType &type,float epsilon); double fm_distancePointLineSegment(const double Point[3],const double LineStart[3],const double LineEnd[3],double intersection[3],LineSegmentType &type,double epsilon); bool fm_colinear(const double p1[3],const double p2[3],const double p3[3],double epsilon=0.999); // true if these three points in a row are co-linear bool fm_colinear(const float p1[3],const float p2[3],const float p3[3],float epsilon=0.999f); bool fm_colinear(const float a1[3],const float a2[3],const float b1[3],const float b2[3],float epsilon=0.999f); // true if these two line segments are co-linear. bool fm_colinear(const double a1[3],const double a2[3],const double b1[3],const double b2[3],double epsilon=0.999); // true if these two line segments are co-linear. enum IntersectResult { IR_DONT_INTERSECT, IR_DO_INTERSECT, IR_COINCIDENT, IR_PARALLEL, }; IntersectResult fm_intersectLineSegments2d(const float a1[3], const float a2[3], const float b1[3], const float b2[3], float intersectionPoint[3]); IntersectResult fm_intersectLineSegments2d(const double a1[3],const double a2[3],const double b1[3],const double b2[3],double intersectionPoint[3]); IntersectResult fm_intersectLineSegments2dTime(const float a1[3], const float a2[3], const float b1[3], const float b2[3],float &t1,float &t2); IntersectResult fm_intersectLineSegments2dTime(const double a1[3],const double a2[3],const double b1[3],const double b2[3],double &t1,double &t2); // Plane-Triangle splitting enum PlaneTriResult { PTR_ON_PLANE, PTR_FRONT, PTR_BACK, PTR_SPLIT, }; PlaneTriResult fm_planeTriIntersection(const float plane[4], // the plane equation in Ax+By+Cz+D format const float *triangle, // the source triangle. uint32_t tstride, // stride in bytes of the input and output *vertices* float epsilon, // the co-planer epsilon value. float *front, // the triangle in front of the uint32_t &fcount, // number of vertices in the 'front' triangle float *back, // the triangle in back of the plane uint32_t &bcount); // the number of vertices in the 'back' triangle. PlaneTriResult fm_planeTriIntersection(const double plane[4], // the plane equation in Ax+By+Cz+D format const double *triangle, // the source triangle. uint32_t tstride, // stride in bytes of the input and output *vertices* double epsilon, // the co-planer epsilon value. double *front, // the triangle in front of the uint32_t &fcount, // number of vertices in the 'front' triangle double *back, // the triangle in back of the plane uint32_t &bcount); // the number of vertices in the 'back' triangle. void fm_intersectPointPlane(const float p1[3],const float p2[3],float *split,const float plane[4]); void fm_intersectPointPlane(const double p1[3],const double p2[3],double *split,const double plane[4]); PlaneTriResult fm_getSidePlane(const float p[3],const float plane[4],float epsilon); PlaneTriResult fm_getSidePlane(const double p[3],const double plane[4],double epsilon); void fm_computeBestFitOBB(uint32_t vcount,const float *points,uint32_t pstride,float *sides,float matrix[16],bool bruteForce=true); void fm_computeBestFitOBB(uint32_t vcount,const double *points,uint32_t pstride,double *sides,double matrix[16],bool bruteForce=true); void fm_computeBestFitOBB(uint32_t vcount,const float *points,uint32_t pstride,float *sides,float pos[3],float quat[4],bool bruteForce=true); void fm_computeBestFitOBB(uint32_t vcount,const double *points,uint32_t pstride,double *sides,double pos[3],double quat[4],bool bruteForce=true); void fm_computeBestFitABB(uint32_t vcount,const float *points,uint32_t pstride,float *sides,float pos[3]); void fm_computeBestFitABB(uint32_t vcount,const double *points,uint32_t pstride,double *sides,double pos[3]); //** Note, if the returned capsule height is less than zero, then you must represent it is a sphere of size radius. void fm_computeBestFitCapsule(uint32_t vcount,const float *points,uint32_t pstride,float &radius,float &height,float matrix[16],bool bruteForce=true); void fm_computeBestFitCapsule(uint32_t vcount,const double *points,uint32_t pstride,float &radius,float &height,double matrix[16],bool bruteForce=true); void fm_planeToMatrix(const float plane[4],float matrix[16]); // convert a plane equation to a 4x4 rotation matrix. Reference vector is 0,1,0 void fm_planeToQuat(const float plane[4],float quat[4],float pos[3]); // convert a plane equation to a quaternion and translation void fm_planeToMatrix(const double plane[4],double matrix[16]); // convert a plane equation to a 4x4 rotation matrix void fm_planeToQuat(const double plane[4],double quat[4],double pos[3]); // convert a plane equation to a quaternion and translation inline void fm_doubleToFloat3(const double p[3],float t[3]) { t[0] = (float) p[0]; t[1] = (float)p[1]; t[2] = (float)p[2]; }; inline void fm_floatToDouble3(const float p[3],double t[3]) { t[0] = (double)p[0]; t[1] = (double)p[1]; t[2] = (double)p[2]; }; void fm_eulerMatrix(float ax,float ay,float az,float matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) void fm_eulerMatrix(double ax,double ay,double az,double matrix[16]); // convert euler (in radians) to a dest 4x4 matrix (translation set to zero) float fm_computeMeshVolume(const float *vertices,uint32_t tcount,const uint32_t *indices); double fm_computeMeshVolume(const double *vertices,uint32_t tcount,const uint32_t *indices); #define FM_DEFAULT_GRANULARITY 0.001f // 1 millimeter is the default granularity class fm_VertexIndex { public: virtual uint32_t getIndex(const float pos[3],bool &newPos) = 0; // get welded index for this float vector[3] virtual uint32_t getIndex(const double pos[3],bool &newPos) = 0; // get welded index for this double vector[3] virtual const float * getVerticesFloat(void) const = 0; virtual const double * getVerticesDouble(void) const = 0; virtual const float * getVertexFloat(uint32_t index) const = 0; virtual const double * getVertexDouble(uint32_t index) const = 0; virtual uint32_t getVcount(void) const = 0; virtual bool isDouble(void) const = 0; virtual bool saveAsObj(const char *fname,uint32_t tcount,uint32_t *indices) = 0; }; fm_VertexIndex * fm_createVertexIndex(double granularity,bool snapToGrid); // create an indexed vertex system for doubles fm_VertexIndex * fm_createVertexIndex(float granularity,bool snapToGrid); // create an indexed vertext system for floats void fm_releaseVertexIndex(fm_VertexIndex *vindex); class fm_Triangulate { public: virtual const double * triangulate3d(uint32_t pcount, const double *points, uint32_t vstride, uint32_t &tcount, bool consolidate, double epsilon) = 0; virtual const float * triangulate3d(uint32_t pcount, const float *points, uint32_t vstride, uint32_t &tcount, bool consolidate, float epsilon) = 0; }; fm_Triangulate * fm_createTriangulate(void); void fm_releaseTriangulate(fm_Triangulate *t); const float * fm_getPoint(const float *points,uint32_t pstride,uint32_t index); const double * fm_getPoint(const double *points,uint32_t pstride,uint32_t index); bool fm_insideTriangle(float Ax, float Ay,float Bx, float By,float Cx, float Cy,float Px, float Py); bool fm_insideTriangle(double Ax, double Ay,double Bx, double By,double Cx, double Cy,double Px, double Py); float fm_areaPolygon2d(uint32_t pcount,const float *points,uint32_t pstride); double fm_areaPolygon2d(uint32_t pcount,const double *points,uint32_t pstride); bool fm_pointInsidePolygon2d(uint32_t pcount,const float *points,uint32_t pstride,const float *point,uint32_t xindex=0,uint32_t yindex=1); bool fm_pointInsidePolygon2d(uint32_t pcount,const double *points,uint32_t pstride,const double *point,uint32_t xindex=0,uint32_t yindex=1); uint32_t fm_consolidatePolygon(uint32_t pcount,const float *points,uint32_t pstride,float *dest,float epsilon=0.999999f); // collapses co-linear edges. uint32_t fm_consolidatePolygon(uint32_t pcount,const double *points,uint32_t pstride,double *dest,double epsilon=0.999999); // collapses co-linear edges. bool fm_computeSplitPlane(uint32_t vcount,const double *vertices,uint32_t tcount,const uint32_t *indices,double *plane); bool fm_computeSplitPlane(uint32_t vcount,const float *vertices,uint32_t tcount,const uint32_t *indices,float *plane); void fm_nearestPointInTriangle(const float *pos,const float *p1,const float *p2,const float *p3,float *nearest); void fm_nearestPointInTriangle(const double *pos,const double *p1,const double *p2,const double *p3,double *nearest); float fm_areaTriangle(const float *p1,const float *p2,const float *p3); double fm_areaTriangle(const double *p1,const double *p2,const double *p3); void fm_subtract(const float *A,const float *B,float *diff); // compute A-B and store the result in 'diff' void fm_subtract(const double *A,const double *B,double *diff); // compute A-B and store the result in 'diff' void fm_multiply(float *A,float scaler); void fm_multiply(double *A,double scaler); void fm_add(const float *A,const float *B,float *sum); void fm_add(const double *A,const double *B,double *sum); void fm_copy3(const float *source,float *dest); void fm_copy3(const double *source,double *dest); // re-indexes an indexed triangle mesh but drops unused vertices. The output_indices can be the same pointer as the input indices. // the output_vertices can point to the input vertices if you desire. The output_vertices buffer should be at least the same size // is the input buffer. The routine returns the new vertex count after re-indexing. uint32_t fm_copyUniqueVertices(uint32_t vcount,const float *input_vertices,float *output_vertices,uint32_t tcount,const uint32_t *input_indices,uint32_t *output_indices); uint32_t fm_copyUniqueVertices(uint32_t vcount,const double *input_vertices,double *output_vertices,uint32_t tcount,const uint32_t *input_indices,uint32_t *output_indices); bool fm_isMeshCoplanar(uint32_t tcount,const uint32_t *indices,const float *vertices,bool doubleSided); // returns true if this collection of indexed triangles are co-planar! bool fm_isMeshCoplanar(uint32_t tcount,const uint32_t *indices,const double *vertices,bool doubleSided); // returns true if this collection of indexed triangles are co-planar! bool fm_samePlane(const float p1[4],const float p2[4],float normalEpsilon=0.01f,float dEpsilon=0.001f,bool doubleSided=false); // returns true if these two plane equations are identical within an epsilon bool fm_samePlane(const double p1[4],const double p2[4],double normalEpsilon=0.01,double dEpsilon=0.001,bool doubleSided=false); void fm_OBBtoAABB(const float obmin[3],const float obmax[3],const float matrix[16],float abmin[3],float abmax[3]); // a utility class that will tessellate a mesh. class fm_Tesselate { public: virtual const uint32_t * tesselate(fm_VertexIndex *vindex,uint32_t tcount,const uint32_t *indices,float longEdge,uint32_t maxDepth,uint32_t &outcount) = 0; }; fm_Tesselate * fm_createTesselate(void); void fm_releaseTesselate(fm_Tesselate *t); void fm_computeMeanNormals(uint32_t vcount, // the number of vertices const float *vertices, // the base address of the vertex position data. uint32_t vstride, // the stride between position data. float *normals, // the base address of the destination for mean vector normals uint32_t nstride, // the stride between normals uint32_t tcount, // the number of triangles const uint32_t *indices); // the triangle indices void fm_computeMeanNormals(uint32_t vcount, // the number of vertices const double *vertices, // the base address of the vertex position data. uint32_t vstride, // the stride between position data. double *normals, // the base address of the destination for mean vector normals uint32_t nstride, // the stride between normals uint32_t tcount, // the number of triangles const uint32_t *indices); // the triangle indices bool fm_isValidTriangle(const float *p1,const float *p2,const float *p3,float epsilon=0.00001f); bool fm_isValidTriangle(const double *p1,const double *p2,const double *p3,double epsilon=0.00001f); }; // end of namespace #endif
30,412
C
58.86811
206
0.705478
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdCircularList.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_CIRCULAR_LIST_H #define VHACD_CIRCULAR_LIST_H #include <stdlib.h> namespace VHACD { //! CircularListElement class. template <typename T> class CircularListElement { public: T& GetData() { return m_data; } const T& GetData() const { return m_data; } CircularListElement<T>*& GetNext() { return m_next; } CircularListElement<T>*& GetPrev() { return m_prev; } const CircularListElement<T>*& GetNext() const { return m_next; } const CircularListElement<T>*& GetPrev() const { return m_prev; } //! Constructor CircularListElement(const T& data) { m_data = data; } CircularListElement(void) {} //! Destructor ~CircularListElement(void) {} private: T m_data; CircularListElement<T>* m_next; CircularListElement<T>* m_prev; CircularListElement(const CircularListElement& rhs); }; //! CircularList class. template <typename T> class CircularList { public: CircularListElement<T>*& GetHead() { return m_head; } const CircularListElement<T>* GetHead() const { return m_head; } bool IsEmpty() const { return (m_size == 0); } size_t GetSize() const { return m_size; } const T& GetData() const { return m_head->GetData(); } T& GetData() { return m_head->GetData(); } bool Delete(); bool Delete(CircularListElement<T>* element); CircularListElement<T>* Add(const T* data = 0); CircularListElement<T>* Add(const T& data); bool Next(); bool Prev(); void Clear() { while (Delete()) ; }; const CircularList& operator=(const CircularList& rhs); //! Constructor CircularList() { m_head = 0; m_size = 0; } CircularList(const CircularList& rhs); //! Destructor ~CircularList(void) { Clear(); }; private: CircularListElement<T>* m_head; //!< a pointer to the head of the circular list size_t m_size; //!< number of element in the circular list }; } #include "vhacdCircularList.inl" #endif // VHACD_CIRCULAR_LIST_H
3,512
C
43.468354
756
0.711845
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btAlignedObjectArray.h
/* Bullet Continuous Collision Detection and Physics Library Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/ This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #ifndef BT_OBJECT_ARRAY__ #define BT_OBJECT_ARRAY__ #include "btAlignedAllocator.h" #include "btScalar.h" // has definitions like SIMD_FORCE_INLINE ///If the platform doesn't support placement new, you can disable BT_USE_PLACEMENT_NEW ///then the btAlignedObjectArray doesn't support objects with virtual methods, and non-trivial constructors/destructors ///You can enable BT_USE_MEMCPY, then swapping elements in the array will use memcpy instead of operator= ///see discussion here: http://continuousphysics.com/Bullet/phpBB2/viewtopic.php?t=1231 and ///http://www.continuousphysics.com/Bullet/phpBB2/viewtopic.php?t=1240 #define BT_USE_PLACEMENT_NEW 1 //#define BT_USE_MEMCPY 1 //disable, because it is cumbersome to find out for each platform where memcpy is defined. It can be in <memory.h> or <string.h> or otherwise... #define BT_ALLOW_ARRAY_COPY_OPERATOR // enabling this can accidently perform deep copies of data if you are not careful #ifdef BT_USE_MEMCPY #include <memory.h> #include <string.h> #endif //BT_USE_MEMCPY #ifdef BT_USE_PLACEMENT_NEW #include <new> //for placement new #endif //BT_USE_PLACEMENT_NEW ///The btAlignedObjectArray template class uses a subset of the stl::vector interface for its methods ///It is developed to replace stl::vector to avoid portability issues, including STL alignment issues to add SIMD/SSE data template <typename T> //template <class T> class btAlignedObjectArray { btAlignedAllocator<T, 16> m_allocator; int32_t m_size; int32_t m_capacity; T* m_data; //PCK: added this line bool m_ownsMemory; #ifdef BT_ALLOW_ARRAY_COPY_OPERATOR public: SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other) { copyFromArray(other); return *this; } #else //BT_ALLOW_ARRAY_COPY_OPERATOR private: SIMD_FORCE_INLINE btAlignedObjectArray<T>& operator=(const btAlignedObjectArray<T>& other); #endif //BT_ALLOW_ARRAY_COPY_OPERATOR protected: SIMD_FORCE_INLINE int32_t allocSize(int32_t size) { return (size ? size * 2 : 1); } SIMD_FORCE_INLINE void copy(int32_t start, int32_t end, T* dest) const { int32_t i; for (i = start; i < end; ++i) #ifdef BT_USE_PLACEMENT_NEW new (&dest[i]) T(m_data[i]); #else dest[i] = m_data[i]; #endif //BT_USE_PLACEMENT_NEW } SIMD_FORCE_INLINE void init() { //PCK: added this line m_ownsMemory = true; m_data = 0; m_size = 0; m_capacity = 0; } SIMD_FORCE_INLINE void destroy(int32_t first, int32_t last) { int32_t i; for (i = first; i < last; i++) { m_data[i].~T(); } } SIMD_FORCE_INLINE void* allocate(int32_t size) { if (size) return m_allocator.allocate(size); return 0; } SIMD_FORCE_INLINE void deallocate() { if (m_data) { //PCK: enclosed the deallocation in this block if (m_ownsMemory) { m_allocator.deallocate(m_data); } m_data = 0; } } public: btAlignedObjectArray() { init(); } ~btAlignedObjectArray() { clear(); } ///Generally it is best to avoid using the copy constructor of an btAlignedObjectArray, and use a (const) reference to the array instead. btAlignedObjectArray(const btAlignedObjectArray& otherArray) { init(); int32_t otherSize = otherArray.size(); resize(otherSize); otherArray.copy(0, otherSize, m_data); } /// return the number of elements in the array SIMD_FORCE_INLINE int32_t size() const { return m_size; } SIMD_FORCE_INLINE const T& at(int32_t n) const { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } SIMD_FORCE_INLINE T& at(int32_t n) { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } SIMD_FORCE_INLINE const T& operator[](int32_t n) const { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } SIMD_FORCE_INLINE T& operator[](int32_t n) { btAssert(n >= 0); btAssert(n < size()); return m_data[n]; } ///clear the array, deallocated memory. Generally it is better to use array.resize(0), to reduce performance overhead of run-time memory (de)allocations. SIMD_FORCE_INLINE void clear() { destroy(0, size()); deallocate(); init(); } SIMD_FORCE_INLINE void pop_back() { btAssert(m_size > 0); m_size--; m_data[m_size].~T(); } ///resize changes the number of elements in the array. If the new size is larger, the new elements will be constructed using the optional second argument. ///when the new number of elements is smaller, the destructor will be called, but memory will not be freed, to reduce performance overhead of run-time memory (de)allocations. SIMD_FORCE_INLINE void resize(int32_t newsize, const T& fillData = T()) { int32_t curSize = size(); if (newsize < curSize) { for (int32_t i = newsize; i < curSize; i++) { m_data[i].~T(); } } else { if (newsize > size()) { reserve(newsize); } #ifdef BT_USE_PLACEMENT_NEW for (int32_t i = curSize; i < newsize; i++) { new (&m_data[i]) T(fillData); } #endif //BT_USE_PLACEMENT_NEW } m_size = newsize; } SIMD_FORCE_INLINE T& expandNonInitializing() { int32_t sz = size(); if (sz == capacity()) { reserve(allocSize(size())); } m_size++; return m_data[sz]; } SIMD_FORCE_INLINE T& expand(const T& fillValue = T()) { int32_t sz = size(); if (sz == capacity()) { reserve(allocSize(size())); } m_size++; #ifdef BT_USE_PLACEMENT_NEW new (&m_data[sz]) T(fillValue); //use the in-place new (not really allocating heap memory) #endif return m_data[sz]; } SIMD_FORCE_INLINE void push_back(const T& _Val) { int32_t sz = size(); if (sz == capacity()) { reserve(allocSize(size())); } #ifdef BT_USE_PLACEMENT_NEW new (&m_data[m_size]) T(_Val); #else m_data[size()] = _Val; #endif //BT_USE_PLACEMENT_NEW m_size++; } /// return the pre-allocated (reserved) elements, this is at least as large as the total number of elements,see size() and reserve() SIMD_FORCE_INLINE int32_t capacity() const { return m_capacity; } SIMD_FORCE_INLINE void reserve(int32_t _Count) { // determine new minimum length of allocated storage if (capacity() < _Count) { // not enough room, reallocate T* s = (T*)allocate(_Count); copy(0, size(), s); destroy(0, size()); deallocate(); //PCK: added this line m_ownsMemory = true; m_data = s; m_capacity = _Count; } } class less { public: bool operator()(const T& a, const T& b) { return (a < b); } }; template <typename L> void quickSortInternal(const L& CompareFunc, int32_t lo, int32_t hi) { // lo is the lower index, hi is the upper index // of the region of array a that is to be sorted int32_t i = lo, j = hi; T x = m_data[(lo + hi) / 2]; // partition do { while (CompareFunc(m_data[i], x)) i++; while (CompareFunc(x, m_data[j])) j--; if (i <= j) { swap(i, j); i++; j--; } } while (i <= j); // recursion if (lo < j) quickSortInternal(CompareFunc, lo, j); if (i < hi) quickSortInternal(CompareFunc, i, hi); } template <typename L> void quickSort(const L& CompareFunc) { //don't sort 0 or 1 elements if (size() > 1) { quickSortInternal(CompareFunc, 0, size() - 1); } } ///heap sort from http://www.csse.monash.edu.au/~lloyd/tildeAlgDS/Sort/Heap/ template <typename L> void downHeap(T* pArr, int32_t k, int32_t n, const L& CompareFunc) { /* PRE: a[k+1..N] is a heap */ /* POST: a[k..N] is a heap */ T temp = pArr[k - 1]; /* k has child(s) */ while (k <= n / 2) { int32_t child = 2 * k; if ((child < n) && CompareFunc(pArr[child - 1], pArr[child])) { child++; } /* pick larger child */ if (CompareFunc(temp, pArr[child - 1])) { /* move child up */ pArr[k - 1] = pArr[child - 1]; k = child; } else { break; } } pArr[k - 1] = temp; } /*downHeap*/ void swap(int32_t index0, int32_t index1) { #ifdef BT_USE_MEMCPY char temp[sizeof(T)]; memcpy(temp, &m_data[index0], sizeof(T)); memcpy(&m_data[index0], &m_data[index1], sizeof(T)); memcpy(&m_data[index1], temp, sizeof(T)); #else T temp = m_data[index0]; m_data[index0] = m_data[index1]; m_data[index1] = temp; #endif //BT_USE_PLACEMENT_NEW } template <typename L> void heapSort(const L& CompareFunc) { /* sort a[0..N-1], N.B. 0 to N-1 */ int32_t k; int32_t n = m_size; for (k = n / 2; k > 0; k--) { downHeap(m_data, k, n, CompareFunc); } /* a[1..N] is now a heap */ while (n >= 1) { swap(0, n - 1); /* largest of a[0..n-1] */ n = n - 1; /* restore a[1..i-1] heap */ downHeap(m_data, 1, n, CompareFunc); } } ///non-recursive binary search, assumes sorted array int32_t findBinarySearch(const T& key) const { int32_t first = 0; int32_t last = size() - 1; //assume sorted array while (first <= last) { int32_t mid = (first + last) / 2; // compute mid point. if (key > m_data[mid]) first = mid + 1; // repeat search in top half. else if (key < m_data[mid]) last = mid - 1; // repeat search in bottom half. else return mid; // found it. return position ///// } return size(); // failed to find key } int32_t findLinearSearch(const T& key) const { int32_t index = size(); int32_t i; for (i = 0; i < size(); i++) { if (m_data[i] == key) { index = i; break; } } return index; } void remove(const T& key) { int32_t findIndex = findLinearSearch(key); if (findIndex < size()) { swap(findIndex, size() - 1); pop_back(); } } //PCK: whole function void initializeFromBuffer(void* buffer, int32_t size, int32_t capacity) { clear(); m_ownsMemory = false; m_data = (T*)buffer; m_size = size; m_capacity = capacity; } void copyFromArray(const btAlignedObjectArray& otherArray) { int32_t otherSize = otherArray.size(); resize(otherSize); otherArray.copy(0, otherSize, m_data); } }; #endif //BT_OBJECT_ARRAY__
12,640
C
27.153675
243
0.55712
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdICHull.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_ICHULL_H #define VHACD_ICHULL_H #include "vhacdManifoldMesh.h" #include "vhacdVector.h" namespace VHACD { //! Incremental Convex Hull algorithm (cf. http://cs.smith.edu/~orourke/books/ftp.html ). enum ICHullError { ICHullErrorOK = 0, ICHullErrorCoplanarPoints, ICHullErrorNoVolume, ICHullErrorInconsistent, ICHullErrorNotEnoughPoints }; class ICHull { public: static const double sc_eps; //! bool IsFlat() { return m_isFlat; } //! Returns the computed mesh TMMesh& GetMesh() { return m_mesh; } //! Add one point to the convex-hull bool AddPoint(const Vec3<double>& point) { return AddPoints(&point, 1); } //! Add one point to the convex-hull bool AddPoint(const Vec3<double>& point, int32_t id); //! Add points to the convex-hull bool AddPoints(const Vec3<double>* points, size_t nPoints); //! ICHullError Process(); //! ICHullError Process(const uint32_t nPointsCH, const double minVolume = 0.0); //! bool IsInside(const Vec3<double>& pt0, const double eps = 0.0); //! const ICHull& operator=(ICHull& rhs); //! Constructor ICHull(); //! Destructor ~ICHull(void){}; private: //! DoubleTriangle builds the initial double triangle. It first finds 3 noncollinear points and makes two faces out of them, in opposite order. It then finds a fourth point that is not coplanar with that face. The vertices are stored in the face structure in counterclockwise order so that the volume between the face and the point is negative. Lastly, the 3 newfaces to the fourth point are constructed and the data structures are cleaned up. ICHullError DoubleTriangle(); //! MakeFace creates a new face structure from three vertices (in ccw order). It returns a pointer to the face. CircularListElement<TMMTriangle>* MakeFace(CircularListElement<TMMVertex>* v0, CircularListElement<TMMVertex>* v1, CircularListElement<TMMVertex>* v2, CircularListElement<TMMTriangle>* fold); //! CircularListElement<TMMTriangle>* MakeConeFace(CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* v); //! bool ProcessPoint(); //! bool ComputePointVolume(double& totalVolume, bool markVisibleFaces); //! bool FindMaxVolumePoint(const double minVolume = 0.0); //! bool CleanEdges(); //! bool CleanVertices(uint32_t& addedPoints); //! bool CleanTriangles(); //! bool CleanUp(uint32_t& addedPoints); //! bool MakeCCW(CircularListElement<TMMTriangle>* f, CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* v); void Clear(); private: static const int32_t sc_dummyIndex; TMMesh m_mesh; SArray<CircularListElement<TMMEdge>*> m_edgesToDelete; SArray<CircularListElement<TMMEdge>*> m_edgesToUpdate; SArray<CircularListElement<TMMTriangle>*> m_trianglesToDelete; Vec3<double> m_normal; bool m_isFlat; ICHull(const ICHull& rhs); }; } #endif // VHACD_ICHULL_H
4,551
C
44.979798
756
0.728631
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/public/VHACD.h
/* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #ifndef VHACD_H #define VHACD_H #define VHACD_VERSION_MAJOR 2 #define VHACD_VERSION_MINOR 3 // Changes for version 2.3 // // m_gamma : Has been removed. This used to control the error metric to merge convex hulls. Now it uses the 'm_maxConvexHulls' value instead. // m_maxConvexHulls : This is the maximum number of convex hulls to produce from the merge operation; replaces 'm_gamma'. // // Note that decomposition depth is no longer a user provided value. It is now derived from the // maximum number of hulls requested. // // As a convenience to the user, each convex hull produced now includes the volume of the hull as well as it's center. // // This version supports a convenience method to automatically make V-HACD run asynchronously in a background thread. // To get a fully asynchronous version, call 'CreateVHACD_ASYNC' instead of 'CreateVHACD'. You get the same interface however, // now when computing convex hulls, it is no longer a blocking operation. All callback messages are still returned // in the application's thread so you don't need to worry about mutex locks or anything in that case. // To tell if the operation is complete, the application should call 'IsReady'. This will return true if // the last approximation operation is complete and will dispatch any pending messages. // If you call 'Compute' while a previous operation was still running, it will automatically cancel the last request // and begin a new one. To cancel a currently running approximation just call 'Cancel'. #include <stdint.h> namespace VHACD { class IVHACD { public: class IUserCallback { public: virtual ~IUserCallback(){}; virtual void Update(const double overallProgress, const double stageProgress, const double operationProgress, const char* const stage, const char* const operation) = 0; }; class IUserLogger { public: virtual ~IUserLogger(){}; virtual void Log(const char* const msg) = 0; }; class ConvexHull { public: double* m_points; uint32_t* m_triangles; uint32_t m_nPoints; uint32_t m_nTriangles; double m_volume; double m_center[3]; }; class Parameters { public: Parameters(void) { Init(); } void Init(void) { m_resolution = 100000; m_concavity = 0.001; m_planeDownsampling = 4; m_convexhullDownsampling = 4; m_alpha = 0.05; m_beta = 0.05; m_pca = 0; m_mode = 0; // 0: voxel-based (recommended), 1: tetrahedron-based m_maxNumVerticesPerCH = 64; m_minVolumePerCH = 0.0001; m_callback = 0; m_logger = 0; m_convexhullApproximation = true; m_oclAcceleration = true; m_maxConvexHulls = 1024; m_projectHullVertices = true; // This will project the output convex hull vertices onto the original source mesh to increase the floating point accuracy of the results } double m_concavity; double m_alpha; double m_beta; double m_minVolumePerCH; IUserCallback* m_callback; IUserLogger* m_logger; uint32_t m_resolution; uint32_t m_maxNumVerticesPerCH; uint32_t m_planeDownsampling; uint32_t m_convexhullDownsampling; uint32_t m_pca; uint32_t m_mode; uint32_t m_convexhullApproximation; uint32_t m_oclAcceleration; uint32_t m_maxConvexHulls; bool m_projectHullVertices; }; class Constraint { public: uint32_t mHullA; // Convex Hull A index uint32_t mHullB; // Convex Hull B index double mConstraintPoint[3]; // The point of intersection between the two convex hulls }; virtual void Cancel() = 0; virtual bool Compute(const float* const points, const uint32_t countPoints, const uint32_t* const triangles, const uint32_t countTriangles, const Parameters& params) = 0; virtual bool Compute(const double* const points, const uint32_t countPoints, const uint32_t* const triangles, const uint32_t countTriangles, const Parameters& params) = 0; virtual uint32_t GetNConvexHulls() const = 0; virtual void GetConvexHull(const uint32_t index, ConvexHull& ch) const = 0; virtual void Clean(void) = 0; // release internally allocated memory virtual void Release(void) = 0; // release IVHACD virtual bool OCLInit(void* const oclDevice, IUserLogger* const logger = 0) = 0; virtual bool OCLRelease(IUserLogger* const logger = 0) = 0; // Will compute the center of mass of the convex hull decomposition results and return it // in 'centerOfMass'. Returns false if the center of mass could not be computed. virtual bool ComputeCenterOfMass(double centerOfMass[3]) const = 0; // Will analyze the HACD results and compute the constraints solutions. // It will analyze the point at which any two convex hulls touch each other and // return the total number of constraint pairs found virtual uint32_t ComputeConstraints(void) = 0; // Returns a pointer to the constraint index; null if the index is not valid or // the user did not previously call 'ComputeConstraints' virtual const Constraint *GetConstraint(uint32_t index) const = 0; // In synchronous mode (non-multi-threaded) the state is always 'ready' // In asynchronous mode, this returns true if the background thread is not still actively computing // a new solution. In an asynchronous config the 'IsReady' call will report any update or log // messages in the caller's current thread. virtual bool IsReady(void) const { return true; } protected: virtual ~IVHACD(void) {} }; IVHACD* CreateVHACD(void); IVHACD* CreateVHACD_ASYNC(void); } #endif // VHACD_H
7,574
C
43.298245
756
0.686163
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/assetutils/NvBlastExtAssetUtils.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtAssetUtils.h" #include "NvBlast.h" #include "NvBlastIndexFns.h" #include "NvBlastMemory.h" #include "NvBlastGlobals.h" #include "math.h" using namespace Nv::Blast; /** Fill the chunk and bond descriptors from an asset. \param[out] chunkDescsWritten the number of chunk descriptors written to chunkDescs \param[out] bondDescsWritten the number of bond descriptors written to bondDescs \param[out] chunkDescs user-supplied buffer of NvBlastChunkDesc. Size must be at least NvBlastAssetGetChunkCount(asset, logFn) \param[out] bondDescs user-supplied buffer of NvBlastBondDesc. Size must be at least NvBlastAssetGetBondCount(asset, logFn) \param[in] asset asset from which to extract descriptors */ static void fillChunkAndBondDescriptorsFromAsset ( uint32_t& chunkDescsWritten, uint32_t& bondDescsWritten, NvBlastChunkDesc* chunkDescs, NvBlastBondDesc* bondDescs, const NvBlastAsset* asset ) { chunkDescsWritten = 0; bondDescsWritten = 0; // Chunk descs const uint32_t assetChunkCount = NvBlastAssetGetChunkCount(asset, logLL); const NvBlastChunk* assetChunk = NvBlastAssetGetChunks(asset, logLL); for (uint32_t i = 0; i < assetChunkCount; ++i, ++assetChunk) { NvBlastChunkDesc& chunkDesc = chunkDescs[chunkDescsWritten++]; memcpy(chunkDesc.centroid, assetChunk->centroid, sizeof(float) * 3); chunkDesc.volume = assetChunk->volume; chunkDesc.parentChunkDescIndex = assetChunk->parentChunkIndex; chunkDesc.flags = 0; // To be filled in below chunkDesc.userData = assetChunk->userData; } // Bond descs const uint32_t assetBondCount = NvBlastAssetGetBondCount(asset, logLL); const NvBlastBond* assetBond = NvBlastAssetGetBonds(asset, logLL); for (uint32_t i = 0; i < assetBondCount; ++i, ++assetBond) { NvBlastBondDesc& bondDesc = bondDescs[bondDescsWritten++]; memcpy(&bondDesc.bond, assetBond, sizeof(NvBlastBond)); } // Walk the graph and restore connection descriptors const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, logLL); for (uint32_t i = 0; i < graph.nodeCount; ++i) { const int32_t currentChunk = graph.chunkIndices[i]; if (isInvalidIndex(currentChunk)) { continue; } chunkDescs[currentChunk].flags |= NvBlastChunkDesc::SupportFlag; // Filling in chunk flags here for (uint32_t j = graph.adjacencyPartition[i]; j < graph.adjacencyPartition[i + 1]; ++j) { NvBlastBondDesc& bondDesc = bondDescs[graph.adjacentBondIndices[j]]; bondDesc.chunkIndices[0] = currentChunk; const uint32_t adjacentChunkIndex = graph.chunkIndices[graph.adjacentNodeIndices[j]]; bondDesc.chunkIndices[1] = adjacentChunkIndex; } } } /** Scale a 3-vector v in-place. \param[in,out] v The vector to scale. \param[in] s The scale. Represents the diagonal elements of a diagonal matrix. The result will be v <- s*v. */ static inline void scale(NvcVec3& v, const NvcVec3& s) { v.x *= s.x; v.y *= s.y; v.z *= s.z; } /** Rotate a 3-vector v in-place using a rotation represented by a quaternion q. \param[in,out] v The vector to rotate. \param[in] q The quaternion representation the rotation. The format of q is { x, y, z, w } where (x,y,z) is the vector part and w is the scalar part. The quaternion q MUST be normalized. */ static inline void rotate(NvcVec3& v, const NvcQuat& q) { const float vx = 2.0f * v.x; const float vy = 2.0f * v.y; const float vz = 2.0f * v.z; const float w2 = q.w * q.w - 0.5f; const float dot2 = (q.x * vx + q.y * vy + q.z * vz); v.x = vx * w2 + (q.y * vz - q.z * vy) * q.w + q.x * dot2; v.y = vy * w2 + (q.z * vx - q.x * vz) * q.w + q.y * dot2; v.z = vz * w2 + (q.x * vy - q.y * vx) * q.w + q.z * dot2; } /** Translate a 3-vector v in-place. \param[in,out] v The vector to translate. \param[in] t The translation. The result will be v <- v+t. */ static inline void translate(NvcVec3& v, const NvcVec3& t) { v.x += t.x; v.y += t.y; v.z += t.z; } NvBlastAsset* NvBlastExtAssetUtilsAddExternalBonds ( const NvBlastAsset* asset, const uint32_t* externalBoundChunks, uint32_t externalBoundChunkCount, const NvcVec3* bondDirections, const uint32_t* bondUserData ) { const uint32_t chunkCount = NvBlastAssetGetChunkCount(asset, logLL); const uint32_t oldBondCount = NvBlastAssetGetBondCount(asset, logLL); const uint32_t newBondCount = oldBondCount + externalBoundChunkCount; NvBlastChunkDesc* chunkDescs = static_cast<NvBlastChunkDesc*>(NVBLAST_ALLOC(chunkCount * sizeof(NvBlastChunkDesc))); NvBlastBondDesc* bondDescs = static_cast<NvBlastBondDesc*>(NVBLAST_ALLOC(newBondCount * sizeof(NvBlastBondDesc))); // Create chunk descs uint32_t chunkDescsWritten; uint32_t bondDescsWritten; fillChunkAndBondDescriptorsFromAsset(chunkDescsWritten, bondDescsWritten, chunkDescs, bondDescs, asset); // Add world bonds uint32_t bondCount = oldBondCount; for (uint32_t i = 0; i < externalBoundChunkCount; i++) { NvBlastBondDesc& bondDesc = bondDescs[bondCount++]; const uint32_t chunkIndex = externalBoundChunks[i]; bondDesc.chunkIndices[0] = chunkIndex; bondDesc.chunkIndices[1] = invalidIndex<uint32_t>(); memcpy(&bondDesc.bond.normal, bondDirections + i, sizeof(float) * 3); bondDesc.bond.area = 1.0f; // Should be set by user memcpy(&bondDesc.bond.centroid, chunkDescs[chunkIndex].centroid, sizeof(float) * 3); bondDesc.bond.userData = bondUserData != nullptr ? bondUserData[i] : 0; } // Create new asset NvBlastAssetDesc assetDesc; assetDesc.chunkCount = chunkCount; assetDesc.chunkDescs = chunkDescs; assetDesc.bondCount = bondCount; assetDesc.bondDescs = bondDescs; void* scratch = NVBLAST_ALLOC(NvBlastGetRequiredScratchForCreateAsset(&assetDesc, logLL)); NvBlastAsset* newAsset = NvBlastCreateAsset(NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&assetDesc, logLL)), &assetDesc, scratch, logLL); // Free buffers NVBLAST_FREE(scratch); NVBLAST_FREE(bondDescs); NVBLAST_FREE(chunkDescs); return newAsset; } NvBlastAssetDesc NvBlastExtAssetUtilsCreateDesc(const NvBlastAsset* asset) { return NvBlastExtAssetUtilsMergeAssets(&asset, nullptr, nullptr, nullptr, 1, nullptr, 0, nullptr, nullptr, 0); } NvBlastAssetDesc NvBlastExtAssetUtilsMergeAssets ( const NvBlastAsset** components, const NvcVec3* scales, const NvcQuat* rotations, const NvcVec3* translations, uint32_t componentCount, const NvBlastExtAssetUtilsBondDesc* newBondDescs, uint32_t newBondCount, uint32_t* chunkIndexOffsets, uint32_t* chunkReorderMap, uint32_t chunkReorderMapSize ) { // Count the total number of chunks and bonds in the new asset uint32_t totalChunkCount = 0; uint32_t totalBondCount = newBondCount; for (uint32_t c = 0; c < componentCount; ++c) { totalChunkCount += NvBlastAssetGetChunkCount(components[c], logLL); totalBondCount += NvBlastAssetGetBondCount(components[c], logLL); } // Allocate space for chunk and bond descriptors NvBlastChunkDesc* chunkDescs = static_cast<NvBlastChunkDesc*>(NVBLAST_ALLOC(totalChunkCount * sizeof(NvBlastChunkDesc))); NvBlastBondDesc* bondDescs = static_cast<NvBlastBondDesc*>(NVBLAST_ALLOC(totalBondCount * sizeof(NvBlastBondDesc))); // Create a list of chunk index offsets per component uint32_t* offsetStackAlloc = static_cast<uint32_t*>(NvBlastAlloca(componentCount * sizeof(uint32_t))); if (chunkIndexOffsets == nullptr) { chunkIndexOffsets = offsetStackAlloc; // Use local stack alloc if no array is provided } // Fill the chunk and bond descriptors from the components uint32_t chunkCount = 0; uint32_t bondCount = 0; for (uint32_t c = 0; c < componentCount; ++c) { chunkIndexOffsets[c] = chunkCount; uint32_t componentChunkCount; uint32_t componentBondCount; fillChunkAndBondDescriptorsFromAsset(componentChunkCount, componentBondCount, chunkDescs + chunkCount, bondDescs + bondCount, components[c]); // Fix chunks' parent indices for (uint32_t i = 0; i < componentChunkCount; ++i) { if (!isInvalidIndex(chunkDescs[chunkCount + i].parentChunkDescIndex)) { chunkDescs[chunkCount + i].parentChunkDescIndex += chunkCount; } } // Fix bonds' chunk indices for (uint32_t i = 0; i < componentBondCount; ++i) { NvBlastBondDesc& bondDesc = bondDescs[bondCount + i]; for (int j = 0; j < 2; ++j) { if (!isInvalidIndex(bondDesc.chunkIndices[j])) { bondDesc.chunkIndices[j] += chunkCount; } } } // Transform geometric data if (scales != nullptr) { const NvcVec3& S = scales[c]; NvcVec3 cofS = { S.y * S.z, S.z * S.x, S.x * S.y }; float absDetS = S.x * S.y * S.z; const float sgnDetS = absDetS < 0.0f ? -1.0f : 1.0f; absDetS *= sgnDetS; for (uint32_t i = 0; i < componentChunkCount; ++i) { scale(reinterpret_cast<NvcVec3&>(chunkDescs[chunkCount + i].centroid), S); chunkDescs[chunkCount + i].volume *= absDetS; } for (uint32_t i = 0; i < componentBondCount; ++i) { NvBlastBond& bond = bondDescs[bondCount + i].bond; scale(reinterpret_cast<NvcVec3&>(bond.normal), cofS); float renorm = sqrtf(bond.normal[0] * bond.normal[0] + bond.normal[1] * bond.normal[1] + bond.normal[2] * bond.normal[2]); bond.area *= renorm; if (renorm != 0) { renorm = sgnDetS / renorm; bond.normal[0] *= renorm; bond.normal[1] *= renorm; bond.normal[2] *= renorm; } scale(reinterpret_cast<NvcVec3&>(bond.centroid), S); } } if (rotations != nullptr) { for (uint32_t i = 0; i < componentChunkCount; ++i) { rotate(reinterpret_cast<NvcVec3&>(chunkDescs[chunkCount + i].centroid), rotations[c]); } for (uint32_t i = 0; i < componentBondCount; ++i) { NvBlastBond& bond = bondDescs[bondCount + i].bond; rotate(reinterpret_cast<NvcVec3&>(bond.normal), rotations[c]); // Normal can be transformed this way since we aren't scaling rotate(reinterpret_cast<NvcVec3&>(bond.centroid), rotations[c]); } } if (translations != nullptr) { for (uint32_t i = 0; i < componentChunkCount; ++i) { translate(reinterpret_cast<NvcVec3&>(chunkDescs[chunkCount + i].centroid), translations[c]); } for (uint32_t i = 0; i < componentBondCount; ++i) { translate(reinterpret_cast<NvcVec3&>(bondDescs[bondCount + i].bond.centroid), translations[c]); } } chunkCount += componentChunkCount; bondCount += componentBondCount; } // Fill the bond descriptors from the new bond descs for (uint32_t b = 0; b < newBondCount; ++b) { const NvBlastExtAssetUtilsBondDesc& newBondDesc = newBondDescs[b]; NvBlastBondDesc& bondDesc = bondDescs[bondCount++]; memcpy(&bondDesc.bond, &newBondDesc.bond, sizeof(NvBlastBond)); bondDesc.chunkIndices[0] = !isInvalidIndex(newBondDesc.chunkIndices[0]) ? newBondDesc.chunkIndices[0] + chunkIndexOffsets[newBondDesc.componentIndices[0]] : invalidIndex<uint32_t>(); bondDesc.chunkIndices[1] = !isInvalidIndex(newBondDesc.chunkIndices[1]) ? newBondDesc.chunkIndices[1] + chunkIndexOffsets[newBondDesc.componentIndices[1]] : invalidIndex<uint32_t>(); } // Create new asset desriptor NvBlastAssetDesc assetDesc; assetDesc.chunkCount = chunkCount; assetDesc.chunkDescs = chunkDescs; assetDesc.bondCount = bondCount; assetDesc.bondDescs = bondDescs; // Massage the descriptors so that they are valid for scratch creation void* scratch = NVBLAST_ALLOC(chunkCount * sizeof(NvBlastChunkDesc)); // Enough for NvBlastEnsureAssetExactSupportCoverage and NvBlastReorderAssetDescChunks NvBlastEnsureAssetExactSupportCoverage(chunkDescs, chunkCount, scratch, logLL); if (chunkReorderMapSize < chunkCount) { if (chunkReorderMap != nullptr) { // Chunk reorder map is not large enough. Fill it with invalid indices and don't use it. memset(chunkReorderMap, 0xFF, chunkReorderMapSize * sizeof(uint32_t)); NVBLAST_LOG_WARNING("NvBlastExtAssetUtilsMergeAssets: insufficient chunkReorderMap array passed in. NvBlastReorderAssetDescChunks will not be used."); } chunkReorderMap = nullptr; // Don't use } if (chunkReorderMap != nullptr) { NvBlastReorderAssetDescChunks(chunkDescs, chunkCount, bondDescs, bondCount, chunkReorderMap, true, scratch, logLL); } NVBLAST_FREE(scratch); return assetDesc; } /** Multiply a 3-vector v in-place by value. \param[in,out] v The vector to multiply. \param[in] m The 3x3 matrix. */ static inline void multiply(NvcVec3& v, float value) { v.x *= value; v.y *= value; v.z *= value; } /** Get Vec3 length */ static inline float length(const NvcVec3& p) { return sqrtf(p.x * p.x + p.y * p.y + p.z * p.z); } /** Transform a point in-place: scale, rotate, then translate \param[in,out] p The point to transform. \param[in] S The diagonal elements of a diagonal scale matrix. \param[in] R A quaternion representing the rotation. Must be normalized. \param[in] T The translation vector. */ static inline void transform(NvcVec3& p, const NvcVec3& S, const NvcQuat& R, const NvcVec3& T) { scale(p, S); rotate(p, R); translate(p, T); } /** Transform a vector in-place: scale, then rotate \param[in,out] v The vector to transform. \param[in] S The diagonal elements of a diagonal scale matrix. \param[in] R A quaternion representing the rotation. Must be normalized. */ static inline void transform(NvcVec3& v, const NvcVec3& S, const NvcQuat& R) { scale(v, S); rotate(v, R); } void NvBlastExtAssetTransformInPlace(NvBlastAsset* asset, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation) { // Local copies of scaling (S), rotation (R), and translation (T) NvcVec3 S = { 1, 1, 1 }; NvcQuat R = { 0, 0, 0, 1 }; NvcVec3 T = { 0, 0, 0 }; NvcVec3 cofS = { 1, 1, 1 }; float absDetS = 1; float sgnDetS = 1; { if (rotation) { R = *rotation; } if (scaling) { S = *scaling; cofS.x = S.y * S.z; cofS.y = S.z * S.x; cofS.z = S.x * S.y; absDetS = S.x * S.y * S.z; sgnDetS = absDetS < 0.0f ? -1.0f : 1.0f; absDetS *= sgnDetS; } if (translation) { T = *translation; } } // Chunk descs const uint32_t assetChunkCount = NvBlastAssetGetChunkCount(asset, logLL); NvBlastChunk* assetChunk = const_cast<NvBlastChunk*>(NvBlastAssetGetChunks(asset, logLL)); for (uint32_t i = 0; i < assetChunkCount; ++i, ++assetChunk) { transform(reinterpret_cast<NvcVec3&>(assetChunk->centroid), S, R, T); assetChunk->volume *= absDetS; // Use |detS| to keep the volume positive } // Bond descs const uint32_t assetBondCount = NvBlastAssetGetBondCount(asset, logLL); NvBlastBond* assetBond = const_cast<NvBlastBond*>(NvBlastAssetGetBonds(asset, logLL)); for (uint32_t i = 0; i < assetBondCount; ++i, ++assetBond) { transform(reinterpret_cast<NvcVec3&>(assetBond->centroid), S, R, T); NvcVec3& normal = reinterpret_cast<NvcVec3&>(assetBond->normal); transform(normal, cofS, R); const float l = length(normal); assetBond->area *= l; multiply(normal, l > 0.f ? sgnDetS / l : 1.f); } }
18,148
C++
36.114519
190
0.648556
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/stress/NvBlastExtStressSolver.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtStressSolver.h" #include "NvBlast.h" #include "NvBlastGlobals.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include "NvBlastHashSet.h" #include "NvBlastAssert.h" #include "NvBlastIndexFns.h" #include "NsFPU.h" #include "NvBlastNvSharedHelpers.h" #include "NvCMath.h" #include "stress.h" #include "buffer.h" #include "simd/simd_device_query.h" #include <algorithm> #define USE_SCALAR_IMPL 0 #define WARM_START 1 #define GRAPH_INTERGRIRY_CHECK 0 #if GRAPH_INTERGRIRY_CHECK #include <set> #endif namespace Nv { namespace Blast { using namespace nvidia; static_assert(sizeof(NvVec3) == sizeof(NvcVec3), "sizeof(NvVec3) must equal sizeof(NvcVec3)."); static_assert(offsetof(NvVec3, x) == offsetof(NvcVec3, x) && offsetof(NvVec3, y) == offsetof(NvcVec3, y) && offsetof(NvVec3, z) == offsetof(NvcVec3, z), "Elements of NvVec3 and NvcVec3 must have the same struct offset."); /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Conjugate Gradient Solver /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// class ConjugateGradientImpulseSolver { public: ConjugateGradientImpulseSolver(uint32_t nodeCount, uint32_t maxBondCount) { m_bonds.reserve(maxBondCount); m_impulses.reserve(maxBondCount); reset(nodeCount); } void getBondImpulses(uint32_t bond, NvVec3& impulseLinear, NvVec3& impulseAngular) const { NVBLAST_ASSERT(bond < m_impulses.size()); const AngLin6& f = m_impulses[bond]; *(NvcVec3*)&impulseAngular = f.ang; *(NvcVec3*)&impulseLinear = f.lin; } void getBondNodes(uint32_t bond, uint32_t& node0, uint32_t& node1) const { NVBLAST_ASSERT(bond < m_bonds.size()); const SolverBond& b = m_bonds[bond]; node0 = b.nodes[0]; node1 = b.nodes[1]; } uint32_t getBondCount() const { return m_bonds.size(); } uint32_t getNodeCount() const { return m_nodes.size(); } void setNodeMassInfo(uint32_t node, const NvVec3& CoM, float mass, float inertia) { NVBLAST_ASSERT(node < m_nodes.size()); SolverNodeS& n = m_nodes[node]; n.CoM = { CoM.x, CoM.y, CoM.z }; n.mass = std::max(mass, 0.0f); // No negative masses, but 0 is meaningful (== infinite) n.inertia = std::max(inertia, 0.0f); // Ditto for inertia m_forceColdStart = true; } void initialize() { StressProcessor::DataParams params; params.centerBonds = true; params.equalizeMasses = true; m_stressProcessor.prepare(m_nodes.begin(), m_nodes.size(), m_bonds.begin(), m_bonds.size(), params); } void setNodeVelocities(uint32_t node, const NvVec3& velocityLinear, const NvVec3& velocityAngular) { NVBLAST_ASSERT(node < m_velocities.size()); AngLin6& v = m_velocities[node]; v.ang = { velocityAngular.x, velocityAngular.y, velocityAngular.z }; v.lin = { velocityLinear.x, velocityLinear.y, velocityLinear.z }; m_inputsChanged = true; } uint32_t addBond(uint32_t node0, uint32_t node1, const NvVec3& bondCentroid) { SolverBond b; b.nodes[0] = node0; b.nodes[1] = node1; b.centroid = { bondCentroid.x, bondCentroid.y, bondCentroid.z }; m_bonds.pushBack(b); m_impulses.push_back({{0,0,0},{0,0,0}}); m_forceColdStart = true; return m_bonds.size() - 1; } void replaceWithLast(uint32_t bondIndex) { m_bonds.replaceWithLast(bondIndex); if ((size_t)bondIndex + 2 < m_impulses.size()) { m_impulses[bondIndex] = m_impulses.back(); m_impulses.resize(m_impulses.size() - 1); } m_stressProcessor.removeBond(bondIndex); } void reset(uint32_t nodeCount) { m_nodes.resize(nodeCount); memset(m_nodes.begin(), 0, sizeof(SolverNodeS)*nodeCount); m_velocities.resize(nodeCount); memset(m_velocities.data(), 0, sizeof(AngLin6)*nodeCount); clearBonds(); m_error_sq = {FLT_MAX, FLT_MAX}; m_converged = false; m_forceColdStart = true; m_inputsChanged = true; } void clearBonds() { m_bonds.clear(); m_impulses.resize(0); m_forceColdStart = true; } void solve(uint32_t iterationCount, bool warmStart = true) { StressProcessor::SolverParams params; params.maxIter = iterationCount; params.tolerance = 0.001f; params.warmStart = warmStart && !m_forceColdStart; m_converged = (m_stressProcessor.solve(m_impulses.data(), m_velocities.data(), params, &m_error_sq) >= 0); m_forceColdStart = false; m_inputsChanged = false; } bool calcError(float& linear, float& angular) const { linear = sqrtf(m_error_sq.lin); angular = sqrtf(m_error_sq.ang); return m_converged; } private: Array<SolverNodeS>::type m_nodes; Array<SolverBond>::type m_bonds; StressProcessor m_stressProcessor; POD_Buffer<AngLin6> m_velocities; POD_Buffer<AngLin6> m_impulses; AngLin6ErrorSq m_error_sq; bool m_converged; bool m_forceColdStart; bool m_inputsChanged; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Graph Processor /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #if GRAPH_INTERGRIRY_CHECK #define CHECK_GRAPH_INTEGRITY checkGraphIntegrity() #else #define CHECK_GRAPH_INTEGRITY ((void)0) #endif class SupportGraphProcessor { public: struct BondData { uint32_t node0; uint32_t node1; uint32_t blastBondIndex; // linear stresses float stressNormal; // negative values represent compression pressure, positive represent tension float stressShear; // The normal used to compute stress values // Can be different than the bond normal if graph reduction is used // and multiple bonds are grouped together nvidia::NvVec3 normal; // Centroid used to compute node offsets, instead of assuming the bond is halfway between node positions. // This also allows the bonds to the world node to be drawn nvidia::NvVec3 centroid; }; struct NodeData { float mass; float volume; NvVec3 localPos; NvVec3 localVel; uint32_t solverNode; uint32_t neighborsCount; }; struct SolverNodeData { uint32_t supportNodesCount; NvVec3 localPos; union { float mass; int32_t indexShift; }; float volume; }; struct SolverBondData { InlineArray<uint32_t, 8>::type blastBondIndices; }; SupportGraphProcessor(uint32_t nodeCount, uint32_t maxBondCount) : m_solver(nodeCount, maxBondCount), m_nodesDirty(true), m_bondsDirty(true) { m_nodesData.resize(nodeCount); m_bondsData.reserve(maxBondCount); m_solverNodesData.resize(nodeCount); m_solverBondsData.reserve(maxBondCount); m_solverBondsMap.reserve(maxBondCount); m_blastBondIndexMap.resize(maxBondCount); memset(m_blastBondIndexMap.begin(), 0xFF, m_blastBondIndexMap.size() * sizeof(uint32_t)); resetVelocities(); } const NodeData& getNodeData(uint32_t node) const { return m_nodesData[node]; } const BondData& getBondData(uint32_t bond) const { return m_bondsData[bond]; } const SolverNodeData& getSolverNodeData(uint32_t node) const { return m_solverNodesData[node]; } const SolverBondData& getSolverBondData(uint32_t bond) const { return m_solverBondsData[bond]; } void getSolverInternalBondImpulses(uint32_t bond, NvVec3& impulseLinear, NvVec3& impulseAngular) const { m_solver.getBondImpulses(bond, impulseLinear, impulseAngular); } void getSolverInternalBondNodes(uint32_t bond, uint32_t& node0, uint32_t& node1) const { m_solver.getBondNodes(bond, node0, node1); } uint32_t getBondCount() const { return m_bondsData.size(); } uint32_t getNodeCount() const { return m_nodesData.size();; } uint32_t getSolverBondCount() const { return m_solverBondsData.size(); } uint32_t getSolverNodeCount() const { return m_solverNodesData.size();; } uint32_t getOverstressedBondCount() const { return m_overstressedBondCount; } void calcSolverBondStresses( uint32_t bondIdx, float bondArea, float nodeDist, const nvidia::NvVec3& bondNormal, float& stressNormal, float& stressShear) const { if (!canTakeDamage(bondArea)) { stressNormal = stressShear = 0.0f; return; } // impulseLinear in the direction of the bond normal is stressNormal, perpendicular is stressShear // ignore impulseAngular for now, not sure how to account for that // convert to pressure to factor out area NvVec3 impulseLinear, impulseAngular; getSolverInternalBondImpulses(bondIdx, impulseLinear, impulseAngular); const float normalComponentLinear = impulseLinear.dot(bondNormal); stressNormal = normalComponentLinear / bondArea; const float impulseLinearMagSqr = impulseLinear.magnitudeSquared(); stressShear = sqrtf(impulseLinearMagSqr - normalComponentLinear * normalComponentLinear) / bondArea; // impulseAngular in the direction of the bond normal is twist, perpendicular is bend // take abs() of the dot product because only the magnitude of the twist matters, not direction const float normalComponentAngular = abs(impulseAngular.dot(bondNormal)); const float twist = normalComponentAngular / bondArea; const float impulseAngularMagSqr = impulseAngular.magnitudeSquared(); const float bend = sqrtf(impulseAngularMagSqr - normalComponentAngular * normalComponentAngular) / bondArea; // interpret angular pressure as a composition of linear pressures // dividing by nodeDist for scaling const float twistContribution = twist * 2.0f / nodeDist; stressShear += twistContribution; const float bendContribution = bend * 2.0f / nodeDist; stressNormal += copysignf(bendContribution, stressNormal); } float mapStressToRange(float stress, float elasticLimit, float fatalLimit) const { if (stress < elasticLimit) { return 0.5f * stress / elasticLimit; } else { return fatalLimit > elasticLimit ? 0.5f + 0.5f * (stress - elasticLimit) / (fatalLimit - elasticLimit) : 1.0f; } } float getSolverBondStressPct(uint32_t bondIdx, const float* bondHealths, const ExtStressSolverSettings& settings, ExtStressSolver::DebugRenderMode mode) const { // sum up the stress of all underlying bonds involved in this stress solver bond float compressionStress, tensionStress, shearStress; float stress = -1.0f; const auto& blastBondIndices = m_solverBondsData[bondIdx].blastBondIndices; for (const auto blastBondIndex : blastBondIndices) { // only consider the stress values on bonds that are intact if (bondHealths[blastBondIndex] > 0.0f && getBondStress(blastBondIndex, compressionStress, tensionStress, shearStress)) { if (mode == ExtStressSolver::STRESS_PCT_COMPRESSION || mode == ExtStressSolver::STRESS_PCT_MAX) { compressionStress = mapStressToRange(compressionStress, settings.compressionElasticLimit, settings.compressionFatalLimit); stress = std::max(compressionStress, stress); } if (mode == ExtStressSolver::STRESS_PCT_TENSION || mode == ExtStressSolver::STRESS_PCT_MAX) { tensionStress = mapStressToRange(tensionStress, settings.tensionElasticLimit, settings.tensionFatalLimit); stress = std::max(tensionStress, stress); } if (mode == ExtStressSolver::STRESS_PCT_SHEAR || mode == ExtStressSolver::STRESS_PCT_MAX) { shearStress = mapStressToRange(shearStress, settings.shearElasticLimit, settings.shearFatalLimit); stress = std::max(shearStress, stress); } // all bonds in the group share the same stress values, no need to keep iterating break; } } // return a value < 0.0f if all bonds are broken return stress; } void setNodeInfo(uint32_t node, float mass, float volume, NvVec3 localPos) { m_nodesData[node].mass = mass; m_nodesData[node].volume = volume; m_nodesData[node].localPos = localPos; m_nodesDirty = true; } void setNodeNeighborsCount(uint32_t node, uint32_t neighborsCount) { // neighbors count is expected to be the number of nodes on 1 island/actor. m_nodesData[node].neighborsCount = neighborsCount; // check for too huge aggregates (happens after island's split) if (!m_nodesDirty) { m_nodesDirty |= (m_solverNodesData[m_nodesData[node].solverNode].supportNodesCount > neighborsCount / 2); } } void addNodeForce(uint32_t node, const NvVec3& force, ExtForceMode::Enum mode) { const float mass = m_nodesData[node].mass; if (mass > 0) { // NOTE - passing in acceleration as velocity. The impulse solver's output will be interpreted as force. m_nodesData[node].localVel += (mode == ExtForceMode::FORCE) ? force/mass : force; } } void addBond(uint32_t node0, uint32_t node1, uint32_t blastBondIndex) { if (isInvalidIndex(m_blastBondIndexMap[blastBondIndex])) { const BondData data = { node0, node1, blastBondIndex, 0.0f }; m_bondsData.pushBack(data); m_blastBondIndexMap[blastBondIndex] = m_bondsData.size() - 1; } } void removeBondIfExists(uint32_t blastBondIndex) { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; if (!isInvalidIndex(bondIndex)) { const BondData& bond = m_bondsData[bondIndex]; const uint32_t solverNode0 = m_nodesData[bond.node0].solverNode; const uint32_t solverNode1 = m_nodesData[bond.node1].solverNode; bool isBondInternal = (solverNode0 == solverNode1); if (isBondInternal) { // internal bond sadly requires graph resync (it never happens on reduction level '0') m_nodesDirty = true; } else if (!m_nodesDirty) { // otherwise it's external bond, we can remove it manually and keep graph synced // we don't need to spend time there if (m_nodesDirty == true), graph will be resynced anyways BondKey solverBondKey(solverNode0, solverNode1); auto entry = m_solverBondsMap.find(solverBondKey); if (entry) { const uint32_t solverBondIndex = entry->second; auto& blastBondIndices = m_solverBondsData[solverBondIndex].blastBondIndices; blastBondIndices.findAndReplaceWithLast(blastBondIndex); if (blastBondIndices.empty()) { // all bonds associated with this solver bond were removed, so let's remove solver bond m_solverBondsData.replaceWithLast(solverBondIndex); m_solver.replaceWithLast(solverBondIndex); if (m_solver.getBondCount() > 0) { // update 'previously last' solver bond mapping uint32_t node0, node1; m_solver.getBondNodes(solverBondIndex, node0, node1); m_solverBondsMap[BondKey(node0, node1)] = solverBondIndex; } m_solverBondsMap.erase(solverBondKey); } } CHECK_GRAPH_INTEGRITY; } // remove bond from graph processor's list m_blastBondIndexMap[blastBondIndex] = invalidIndex<uint32_t>(); m_bondsData.replaceWithLast(bondIndex); m_blastBondIndexMap[m_bondsData[bondIndex].blastBondIndex] = m_bondsData.size() > bondIndex ? bondIndex : invalidIndex<uint32_t>(); } } void setGraphReductionLevel(uint32_t level) { m_graphReductionLevel = level; m_nodesDirty = true; } uint32_t getGraphReductionLevel() const { return m_graphReductionLevel; } void solve(const ExtStressSolverSettings& settings, const float* bondHealth, const NvBlastBond* bonds, bool warmStart = true) { sync(bonds); for (const NodeData& node : m_nodesData) { m_solver.setNodeVelocities(node.solverNode, node.localVel, NvVec3(NvZero)); } m_solver.solve(settings.maxSolverIterationsPerFrame, warmStart); resetVelocities(); updateBondStress(settings, bondHealth, bonds); } bool calcError(float& linear, float& angular) const { return m_solver.calcError(linear, angular); } bool getBondStress(uint32_t blastBondIndex, float& compression, float& tension, float& shear) const { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; if (isInvalidIndex(bondIndex)) { return false; } // compression and tension are mutually exclusive since they operate in opposite directions // they both measure stress parallel to the bond normal direction // compression is the force resisting two nodes being pushed together (it pushes them apart) // tension is the force resisting two nodes being pulled apart (it pulls them together) if (m_bondsData[bondIndex].stressNormal <= 0.0f) { compression = -m_bondsData[bondIndex].stressNormal; tension = 0.0f; } else { compression = 0.0f; tension = m_bondsData[bondIndex].stressNormal; } // shear is independent and can co-exist with compression and tension shear = m_bondsData[bondIndex].stressShear; // the force perpendicular to the bond normal direction return true; } // Convert from Blast bond index to internal stress solver bond index // Will be InvalidIndex if the internal bond was removed from the stress solver uint32_t getInternalBondIndex(uint32_t blastBondIndex) { return m_blastBondIndexMap[blastBondIndex]; } private: void resetVelocities() { for (auto& node : m_nodesData) { node.localVel = NvVec3(NvZero); } } void updateBondStress(const ExtStressSolverSettings& settings, const float* bondHealth, const NvBlastBond* bonds) { m_overstressedBondCount = 0; Array<uint32_t>::type bondIndicesToRemove; bondIndicesToRemove.reserve(getBondCount()); for (uint32_t i = 0; i < m_solverBondsData.size(); ++i) { // calculate the total area of all bonds involved so pressure can be calculated float totalArea = 0.0f; // calculate an average normal and centroid for all bonds as well, weighted by their area nvidia::NvVec3 bondNormal(NvZero); nvidia::NvVec3 bondCentroid(NvZero); nvidia::NvVec3 averageNodeDisp(NvZero); const auto& blastBondIndices = m_solverBondsData[i].blastBondIndices; for (auto blastBondIndex : blastBondIndices) { if (bondHealth[blastBondIndex] > 0.0f) { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; const BondData& bond = m_bondsData[bondIndex]; const nvidia::NvVec3 nodeDisp = m_nodesData[bond.node1].localPos - m_nodesData[bond.node0].localPos; // the current health of a bond is the effective area remaining const float remainingArea = bondHealth[blastBondIndex]; const NvBlastBond& blastBond = bonds[blastBondIndex]; // Align normal(s) with node displacement, so that compressive/tensile distinction is correct const nvidia::NvVec3 assetBondNormal(blastBond.normal[0], blastBond.normal[1], blastBond.normal[2]); const nvidia::NvVec3 blastBondNormal = std::copysignf(1.0f, assetBondNormal.dot(nodeDisp))*assetBondNormal; const nvidia::NvVec3 blastBondCentroid(blastBond.centroid[0], blastBond.centroid[1], blastBond.centroid[2]); if (!canTakeDamage(remainingArea)) // Check unbreakable limit { totalArea = kUnbreakableLimit; // Don't add this in, in case of overflow bondNormal = blastBondNormal; bondCentroid = blastBondCentroid; averageNodeDisp = nodeDisp; break; } bondNormal += blastBondNormal*remainingArea; bondCentroid += blastBondCentroid*remainingArea; averageNodeDisp += nodeDisp*remainingArea; totalArea += remainingArea; } else { // if the bond is broken, try to remove it after processing is complete bondIndicesToRemove.pushBack(blastBondIndex); } } if (totalArea == 0.0f) { continue; } // normalized the aggregate normal now that all contributing bonds have been combined bondNormal.normalizeSafe(); // divide by total area for the weighted position, if the area is valid if (canTakeDamage(totalArea)) { bondCentroid /= totalArea; averageNodeDisp /= totalArea; } // bonds are looked at as a whole group, // so regardless of the current health of an individual one they are either all over stressed or none are float stressNormal, stressShear; calcSolverBondStresses(i, totalArea, averageNodeDisp.magnitude(), bondNormal, stressNormal, stressShear); NVBLAST_ASSERT(!std::isnan(stressNormal) && !std::isnan(stressShear)); if ( -stressNormal > settings.compressionElasticLimit || stressNormal > settings.tensionElasticLimit || stressShear > settings.shearElasticLimit ) { m_overstressedBondCount += blastBondIndices.size(); } // store the stress values for all the bonds involved for (auto blastBondIndex : blastBondIndices) { const uint32_t bondIndex = m_blastBondIndexMap[blastBondIndex]; if (!isInvalidIndex(bondIndex) && bondHealth[blastBondIndex] > 0.0f) { BondData& bond = m_bondsData[bondIndex]; NVBLAST_ASSERT(getNodeData(bond.node0).solverNode != getNodeData(bond.node1).solverNode); NVBLAST_ASSERT(bond.blastBondIndex == blastBondIndex); bond.stressNormal = stressNormal; bond.stressShear = stressShear; // store the normal used to calc stresses so it can be used later to determine forces bond.normal = bondNormal; // store the bond centroid bond.centroid = bondCentroid; } } } // now that processing is done, remove any dead bonds for (uint32_t bondIndex : bondIndicesToRemove) { removeBondIfExists(bondIndex); } } void sync(const NvBlastBond* bonds) { if (m_nodesDirty) { syncNodes(bonds); m_solver.initialize(); } if (m_bondsDirty) { syncBonds(bonds); } CHECK_GRAPH_INTEGRITY; } void syncNodes(const NvBlastBond* bonds) { // init with 1<->1 blast nodes to solver nodes mapping m_solverNodesData.resize(m_nodesData.size()); for (uint32_t i = 0; i < m_nodesData.size(); ++i) { m_nodesData[i].solverNode = i; m_solverNodesData[i].supportNodesCount = 1; m_solverNodesData[i].indexShift = 0; } // for static nodes aggregate size per graph reduction level is lower, it // falls behind on few levels. (can be made as parameter) const uint32_t STATIC_NODES_COUNT_PENALTY = 2 << 2; // reducing graph by aggregating nodes level by level // NOTE (@anovoselov): Recently, I found a flow in the algorithm below. In very rare situations aggregate (solver node) // can contain more then one connected component. I didn't notice it to produce any visual artifacts and it's // unlikely to influence stress solvement a lot. Possible solution is to merge *whole* solver nodes, that // will raise complexity a bit (at least will add another loop on nodes for every reduction level. for (uint32_t k = 0; k < m_graphReductionLevel; k++) { const uint32_t maxAggregateSize = 1 << (k + 1); for (const BondData& bond : m_bondsData) { NodeData& node0 = m_nodesData[bond.node0]; NodeData& node1 = m_nodesData[bond.node1]; if (node0.solverNode == node1.solverNode) continue; SolverNodeData& solverNode0 = m_solverNodesData[node0.solverNode]; SolverNodeData& solverNode1 = m_solverNodesData[node1.solverNode]; const int countPenalty = 1; // This was being set to STATIC_NODES_COUNT_PENALTY for static nodes, may want to revisit const uint32_t aggregateSize = std::min<uint32_t>(maxAggregateSize, node0.neighborsCount / 2); if (solverNode0.supportNodesCount * countPenalty >= aggregateSize) continue; if (solverNode1.supportNodesCount * countPenalty >= aggregateSize) continue; if (solverNode0.supportNodesCount >= solverNode1.supportNodesCount) { solverNode1.supportNodesCount--; solverNode0.supportNodesCount++; node1.solverNode = node0.solverNode; } else if (solverNode1.supportNodesCount >= solverNode0.supportNodesCount) { solverNode1.supportNodesCount++; solverNode0.supportNodesCount--; node0.solverNode = node1.solverNode; } } } // Solver Nodes now sparse, a lot of empty ones. Rearrange them by moving all non-empty to the front // 2 passes used for that { uint32_t currentNode = 0; for (; currentNode < m_solverNodesData.size(); ++currentNode) { if (m_solverNodesData[currentNode].supportNodesCount > 0) continue; // 'currentNode' is free // search next occupied node uint32_t k = currentNode + 1; for (; k < m_solverNodesData.size(); ++k) { if (m_solverNodesData[k].supportNodesCount > 0) { // replace currentNode and keep indexShift m_solverNodesData[currentNode].supportNodesCount = m_solverNodesData[k].supportNodesCount; m_solverNodesData[k].indexShift = k - currentNode; m_solverNodesData[k].supportNodesCount = 0; break; } } if (k == m_solverNodesData.size()) { break; } } for (auto& node : m_nodesData) { node.solverNode -= m_solverNodesData[node.solverNode].indexShift; } // now, we know total solver nodes count and which nodes are aggregated into them m_solverNodesData.resize(currentNode); } // calculate all needed data for (SolverNodeData& solverNode : m_solverNodesData) { solverNode.supportNodesCount = 0; solverNode.localPos = NvVec3(NvZero); solverNode.mass = 0.0f; solverNode.volume = 0.0f; } for (NodeData& node : m_nodesData) { SolverNodeData& solverNode = m_solverNodesData[node.solverNode]; solverNode.supportNodesCount++; solverNode.localPos += node.localPos; solverNode.mass += node.mass; solverNode.volume += node.volume; } for (SolverNodeData& solverNode : m_solverNodesData) { solverNode.localPos /= (float)solverNode.supportNodesCount; } m_solver.reset(m_solverNodesData.size()); for (uint32_t nodeIndex = 0; nodeIndex < m_solverNodesData.size(); ++nodeIndex) { const SolverNodeData& solverNode = m_solverNodesData[nodeIndex]; const float R = NvPow(solverNode.volume * 3.0f * NvInvPi / 4.0f, 1.0f / 3.0f); // sphere volume approximation const float inertia = solverNode.mass * (R * R * 0.4f); // sphere inertia tensor approximation: I = 2/5 * M * R^2 ; invI = 1 / I; m_solver.setNodeMassInfo(nodeIndex, solverNode.localPos, solverNode.mass, inertia); } m_nodesDirty = false; syncBonds(bonds); } void syncBonds(const NvBlastBond* bonds) { // traverse all blast bonds and aggregate m_solver.clearBonds(); m_solverBondsMap.clear(); m_solverBondsData.clear(); for (BondData& bond : m_bondsData) { const NodeData& node0 = m_nodesData[bond.node0]; const NodeData& node1 = m_nodesData[bond.node1]; // reset stress, bond structure changed and internal bonds stress won't be updated during updateBondStress() bond.stressNormal = 0.0f; bond.stressShear = 0.0f; // initialize normal and centroid using blast values bond.normal = *(NvVec3*)bonds[bond.blastBondIndex].normal; bond.centroid = *(NvVec3*)bonds[bond.blastBondIndex].centroid; // fix normal direction to point from node0 to node1 bond.normal *= std::copysignf(1.0f, bond.normal.dot(node1.localPos - node1.localPos)); if (node0.solverNode == node1.solverNode) continue; // skip (internal) BondKey key(node0.solverNode, node1.solverNode); auto entry = m_solverBondsMap.find(key); SolverBondData* data; if (!entry) { m_solverBondsData.pushBack(SolverBondData()); data = &m_solverBondsData.back(); m_solverBondsMap[key] = m_solverBondsData.size() - 1; m_solver.addBond(node0.solverNode, node1.solverNode, bond.centroid); } else { data = &m_solverBondsData[entry->second]; } data->blastBondIndices.pushBack(bond.blastBondIndex); } m_bondsDirty = false; } #if GRAPH_INTERGRIRY_CHECK void checkGraphIntegrity() { NVBLAST_ASSERT(m_solver.getBondCount() == m_solverBondsData.size()); NVBLAST_ASSERT(m_solver.getNodeCount() == m_solverNodesData.size()); std::set<uint64_t> solverBonds; for (uint32_t i = 0; i < m_solverBondsData.size(); ++i) { const auto& bondData = m_solver.getBondData(i); BondKey key(bondData.node0, bondData.node1); NVBLAST_ASSERT(solverBonds.find(key) == solverBonds.end()); solverBonds.emplace(key); auto entry = m_solverBondsMap.find(key); NVBLAST_ASSERT(entry != nullptr); const auto& solverBond = m_solverBondsData[entry->second]; for (auto& blastBondIndex : solverBond.blastBondIndices) { if (!isInvalidIndex(m_blastBondIndexMap[blastBondIndex])) { auto& b = m_bondsData[m_blastBondIndexMap[blastBondIndex]]; BondKey key2(m_nodesData[b.node0].solverNode, m_nodesData[b.node1].solverNode); NVBLAST_ASSERT(key2 == key); } } } for (auto& solverBond : m_solverBondsData) { for (auto& blastBondIndex : solverBond.blastBondIndices) { if (!isInvalidIndex(m_blastBondIndexMap[blastBondIndex])) { auto& b = m_bondsData[m_blastBondIndexMap[blastBondIndex]]; NVBLAST_ASSERT(m_nodesData[b.node0].solverNode != m_nodesData[b.node1].solverNode); } } } uint32_t mappedBondCount = 0; for (uint32_t i = 0; i < m_blastBondIndexMap.size(); i++) { const auto& bondIndex = m_blastBondIndexMap[i]; if (!isInvalidIndex(bondIndex)) { mappedBondCount++; NVBLAST_ASSERT(m_bondsData[bondIndex].blastBondIndex == i); } } NVBLAST_ASSERT(m_bondsData.size() == mappedBondCount); } #endif struct BondKey { uint32_t node0; uint32_t node1; BondKey(uint32_t n0, uint32_t n1) : node0(n0), node1(n1) {} operator uint64_t() const { // Szudzik's function return node0 >= node1 ? (uint64_t)node0 * node0 + node0 + node1 : (uint64_t)node1 * node1 + node0; } }; ConjugateGradientImpulseSolver m_solver; Array<SolverNodeData>::type m_solverNodesData; Array<SolverBondData>::type m_solverBondsData; uint32_t m_graphReductionLevel; bool m_nodesDirty; bool m_bondsDirty; uint32_t m_overstressedBondCount; HashMap<BondKey, uint32_t>::type m_solverBondsMap; Array<uint32_t>::type m_blastBondIndexMap; Array<BondData>::type m_bondsData; Array<NodeData>::type m_nodesData; }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // ExtStressSolver /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /** */ class ExtStressSolverImpl final : public ExtStressSolver { NV_NOCOPY(ExtStressSolverImpl) public: ExtStressSolverImpl(const NvBlastFamily& family, const ExtStressSolverSettings& settings); virtual void release() override; //////// ExtStressSolverImpl interface //////// virtual void setAllNodesInfoFromLL(float density = 1.0f) override; virtual void setNodeInfo(uint32_t graphNode, float mass, float volume, NvcVec3 localPos) override; virtual void setSettings(const ExtStressSolverSettings& settings) override { m_settings = settings; inheritSettingsLimits(); } virtual const ExtStressSolverSettings& getSettings() const override { return m_settings; } virtual bool addForce(const NvBlastActor& actor, NvcVec3 localPosition, NvcVec3 localForce, ExtForceMode::Enum mode) override; virtual void addForce(uint32_t graphNode, NvcVec3 localForce, ExtForceMode::Enum mode) override; virtual bool addGravity(const NvBlastActor& actor, NvcVec3 localGravity) override; virtual bool addCentrifugalAcceleration(const NvBlastActor& actor, NvcVec3 localCenterMass, NvcVec3 localAngularVelocity) override; virtual void update() override; virtual uint32_t getOverstressedBondCount() const override { return m_graphProcessor->getOverstressedBondCount(); } virtual void generateFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) override; virtual uint32_t generateFractureCommandsPerActor(const NvBlastActor** actorBuffer, NvBlastFractureBuffers* commandsBuffer, uint32_t bufferSize) override; void reset() override { m_reset = true; } virtual float getStressErrorLinear() const override { return m_errorLinear; } virtual float getStressErrorAngular() const override { return m_errorAngular; } virtual bool converged() const override { return m_converged; } virtual uint32_t getFrameCount() const override { return m_framesCount; } virtual uint32_t getBondCount() const override { return m_graphProcessor->getSolverBondCount(); } virtual bool getExcessForces(uint32_t actorIndex, const NvcVec3& com, NvcVec3& force, NvcVec3& torque) override; virtual bool notifyActorCreated(const NvBlastActor& actor) override; virtual void notifyActorDestroyed(const NvBlastActor& actor) override; virtual const DebugBuffer fillDebugRender(const uint32_t* nodes, uint32_t nodeCount, DebugRenderMode mode, float scale) override; private: ~ExtStressSolverImpl(); //////// private methods //////// void solve(); void fillFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands); void initialize(); void iterate(); void removeBrokenBonds(); template<class T> T* getScratchArray(uint32_t size); bool generateStressDamage(const NvBlastActor& actor, uint32_t bondIndex, uint32_t node0, uint32_t node1); void inheritSettingsLimits() { NVBLAST_ASSERT(m_settings.compressionElasticLimit >= 0.0f && m_settings.compressionFatalLimit >= 0.0f); // check if any optional limits need to inherit from the compression values if (m_settings.tensionElasticLimit < 0.0f) { m_settings.tensionElasticLimit = m_settings.compressionElasticLimit; } if (m_settings.tensionFatalLimit < 0.0f) { m_settings.tensionFatalLimit = m_settings.compressionFatalLimit; } if (m_settings.shearElasticLimit < 0.0f) { m_settings.shearElasticLimit = m_settings.compressionElasticLimit; } if (m_settings.shearFatalLimit < 0.0f) { m_settings.shearFatalLimit = m_settings.compressionFatalLimit; } } //////// data //////// const NvBlastFamily& m_family; HashSet<const NvBlastActor*>::type m_activeActors; ExtStressSolverSettings m_settings; NvBlastSupportGraph m_graph; bool m_isDirty; bool m_reset; const float* m_bondHealths; const float* m_cachedBondHealths; const NvBlastBond* m_bonds; SupportGraphProcessor* m_graphProcessor; float m_errorAngular; float m_errorLinear; bool m_converged; uint32_t m_framesCount; Array<NvBlastBondFractureData>::type m_bondFractureBuffer; Array<uint8_t>::type m_scratch; Array<DebugLine>::type m_debugLineBuffer; }; template<class T> NV_INLINE T* ExtStressSolverImpl::getScratchArray(uint32_t size) { const uint32_t scratchSize = sizeof(T) * size; if (m_scratch.size() < scratchSize) { m_scratch.resize(scratchSize); } return reinterpret_cast<T*>(m_scratch.begin()); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Creation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ExtStressSolverImpl::ExtStressSolverImpl(const NvBlastFamily& family, const ExtStressSolverSettings& settings) : m_family(family), m_settings(settings), m_isDirty(false), m_reset(false), m_errorAngular(std::numeric_limits<float>::max()), m_errorLinear(std::numeric_limits<float>::max()), m_converged(false), m_framesCount(0) { // this needs to be called any time settings change, including when they are first set inheritSettingsLimits(); const NvBlastAsset* asset = NvBlastFamilyGetAsset(&m_family, logLL); NVBLAST_ASSERT(asset); m_graph = NvBlastAssetGetSupportGraph(asset, logLL); const uint32_t bondCount = NvBlastAssetGetBondCount(asset, logLL); m_bondFractureBuffer.reserve(bondCount); { NvBlastActor* actor; NvBlastFamilyGetActors(&actor, 1, &family, logLL); m_bondHealths = NvBlastActorGetBondHealths(actor, logLL); m_cachedBondHealths = NvBlastActorGetCachedBondHeaths(actor, logLL); m_bonds = NvBlastAssetGetBonds(asset, logLL); } m_graphProcessor = NVBLAST_NEW(SupportGraphProcessor)(m_graph.nodeCount, bondCount); // traverse graph and fill bond info for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0) { for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++) { uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex]; if (m_bondHealths[bondIndex] <= 0.0f) continue; uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex]; if (node0 < node1) { m_graphProcessor->addBond(node0, node1, bondIndex); } } } } ExtStressSolverImpl::~ExtStressSolverImpl() { NVBLAST_DELETE(m_graphProcessor, SupportGraphProcessor); } ExtStressSolver* ExtStressSolver::create(const NvBlastFamily& family, const ExtStressSolverSettings& settings) { return NVBLAST_NEW(ExtStressSolverImpl) (family, settings); } void ExtStressSolverImpl::release() { NVBLAST_DELETE(this, ExtStressSolverImpl); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Actors & Graph Data /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ExtStressSolverImpl::setAllNodesInfoFromLL(float density) { const NvBlastAsset* asset = NvBlastFamilyGetAsset(&m_family, logLL); NVBLAST_ASSERT(asset); const uint32_t chunkCount = NvBlastAssetGetChunkCount(asset, logLL); const NvBlastChunk* chunks = NvBlastAssetGetChunks(asset, logLL); // traverse graph and fill node info for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0) { const uint32_t chunkIndex0 = m_graph.chunkIndices[node0]; if (chunkIndex0 >= chunkCount) { // chunkIndex is invalid means it is static node (represents world) m_graphProcessor->setNodeInfo(node0, 0.0f, 0.0f, NvVec3(NvZero)); } else { // fill node info const NvBlastChunk& chunk = chunks[chunkIndex0]; const float volume = chunk.volume; const float mass = volume * density; const NvVec3 localPos = *reinterpret_cast<const NvVec3*>(chunk.centroid); m_graphProcessor->setNodeInfo(node0, mass, volume, localPos); } } } void ExtStressSolverImpl::setNodeInfo(uint32_t graphNode, float mass, float volume, NvcVec3 localPos) { m_graphProcessor->setNodeInfo(graphNode, mass, volume, toNvShared(localPos)); } bool ExtStressSolverImpl::getExcessForces(uint32_t actorIndex, const NvcVec3& com, NvcVec3& force, NvcVec3& torque) { // otherwise allocate enough space and query the Blast SDK const NvBlastActor* actor = NvBlastFamilyGetActorByIndex(&m_family, actorIndex, logLL); if (actor == nullptr) { return false; } const uint32_t nodeCount = NvBlastActorGetGraphNodeCount(actor, logLL); uint32_t* nodeIndices = getScratchArray<uint32_t>(nodeCount); const uint32_t retCount = NvBlastActorGetGraphNodeIndices(nodeIndices, nodeCount, actor, logLL); NVBLAST_ASSERT(retCount == nodeCount); // get the mapping between support chunks and actor indices // this is the fastest way to tell if two node/chunks are part of the same actor const uint32_t* actorIndices = NvBlastFamilyGetChunkActorIndices(&m_family, logLL); if (actorIndices == nullptr) { return false; } // walk the visible nodes for the actor looking for bonds that broke this frame nvidia::NvVec3 totalForce(0.0f); nvidia::NvVec3 totalTorque(0.0f); for (uint32_t n = 0; n < nodeCount; n++) { // find bonds that broke this frame (health <= 0 but internal stress bond index is still valid) const uint32_t nodeIdx = nodeIndices[n]; for (uint32_t i = m_graph.adjacencyPartition[nodeIdx]; i < m_graph.adjacencyPartition[nodeIdx + 1]; i++) { // check if the bond is broken first of all const uint32_t blastBondIndex = m_graph.adjacentBondIndices[i]; if (m_bondHealths[blastBondIndex] > 0.0f) { continue; } // broken bonds that have invalid internal indices broke before this frame const uint32_t internalBondIndex = m_graphProcessor->getInternalBondIndex(blastBondIndex); if (isInvalidIndex(internalBondIndex)) { continue; } // make sure the other node in the bond isn't part of the same actor // forces should only be applied due to bonds breaking between actors, not within const uint32_t chunkIdx = m_graph.chunkIndices[nodeIdx]; const uint32_t otherNodeIdx = m_graph.adjacentNodeIndices[i]; const uint32_t otherChunkIdx = m_graph.chunkIndices[otherNodeIdx]; if (!isInvalidIndex(chunkIdx) && !isInvalidIndex(otherChunkIdx) && actorIndices[chunkIdx] == actorIndices[otherChunkIdx]) { continue; } // this bond should contribute forces to the output const auto bondData = m_graphProcessor->getBondData(internalBondIndex); NVBLAST_ASSERT(blastBondIndex == bondData.blastBondIndex); uint32_t node0, node1; m_graphProcessor->getSolverInternalBondNodes(internalBondIndex, node0, node1); NVBLAST_ASSERT(bondData.node0 == internalBondData.node0 && bondData.node1 == internalBondData.node1); // accumulators for forces just from this bond nvidia::NvVec3 nvLinearPressure(0.0f); nvidia::NvVec3 nvAngularPressure(0.0f); // deal with linear forces const float excessCompression = bondData.stressNormal + m_settings.compressionFatalLimit; const float excessTension = bondData.stressNormal - m_settings.tensionFatalLimit; if (excessCompression < 0.0f) { nvLinearPressure += excessCompression * bondData.normal; } else if (excessTension > 0.0f) { // tension is in the negative direction of the linear impulse nvLinearPressure += excessTension * bondData.normal; } const float excessShear = bondData.stressShear - m_settings.shearFatalLimit; if (excessShear > 0.0f) { NvVec3 impulseLinear, impulseAngular; m_graphProcessor->getSolverInternalBondImpulses(internalBondIndex, impulseLinear, impulseAngular); const nvidia::NvVec3 shearDir = impulseLinear - impulseLinear.dot(bondData.normal)*bondData.normal; nvLinearPressure += excessShear * shearDir.getNormalized(); } if (nvLinearPressure.magnitudeSquared() > FLT_EPSILON) { const float* bondCenter = m_bonds[blastBondIndex].centroid; const nvidia::NvVec3 forceOffset = nvidia::NvVec3(bondCenter[0], bondCenter[1], bondCenter[3]) - toNvShared(com); const nvidia::NvVec3 torqueFromForce = forceOffset.cross(nvLinearPressure); nvAngularPressure += torqueFromForce; } // add the contributions from this bond to the total forces for the actor // multiply by the area to convert back to force from pressure const float bondRemainingArea = m_cachedBondHealths[blastBondIndex]; NVBLAST_ASSERT(bondRemainingArea <= m_bonds[blastBondIndex].area); const float sign = otherNodeIdx > nodeIdx ? 1.0f : -1.0f; totalForce += nvLinearPressure * (sign*bondRemainingArea); totalTorque += nvAngularPressure * (sign*bondRemainingArea); } } // convert to the output format and return true if non-zero forces were accumulated force = fromNvShared(totalForce); torque = fromNvShared(totalTorque); return (totalForce.magnitudeSquared() + totalTorque.magnitudeSquared()) > 0.0f; } bool ExtStressSolverImpl::notifyActorCreated(const NvBlastActor& actor) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { // update neighbors { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { m_graphProcessor->setNodeNeighborsCount(graphNodeIndices[i], nodeCount); } } m_activeActors.insert(&actor); m_isDirty = true; return true; } return false; } void ExtStressSolverImpl::notifyActorDestroyed(const NvBlastActor& actor) { if (m_activeActors.erase(&actor)) { m_isDirty = true; } } void ExtStressSolverImpl::removeBrokenBonds() { // traverse graph and remove dead bonds for (uint32_t node0 = 0; node0 < m_graph.nodeCount; ++node0) { for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++) { uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex]; if (node0 < node1) { uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex]; if (m_bondHealths[bondIndex] <= 0.0f) { m_graphProcessor->removeBondIfExists(bondIndex); } } } } m_isDirty = false; } void ExtStressSolverImpl::initialize() { if (m_reset) { m_framesCount = 0; } if (m_isDirty) { removeBrokenBonds(); } if (m_settings.graphReductionLevel != m_graphProcessor->getGraphReductionLevel()) { m_graphProcessor->setGraphReductionLevel(m_settings.graphReductionLevel); } } bool ExtStressSolverImpl::addForce(const NvBlastActor& actor, NvcVec3 localPosition, NvcVec3 localForce, ExtForceMode::Enum mode) { float bestDist = FLT_MAX; uint32_t bestNode = invalidIndex<uint32_t>(); const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node = graphNodeIndices[i]; const float sqrDist = (toNvShared(localPosition) - m_graphProcessor->getNodeData(node).localPos).magnitudeSquared(); if (sqrDist < bestDist) { bestDist = sqrDist; bestNode = node; } } if (!isInvalidIndex(bestNode)) { m_graphProcessor->addNodeForce(bestNode, toNvShared(localForce), mode); return true; } } return false; } void ExtStressSolverImpl::addForce(uint32_t graphNode, NvcVec3 localForce, ExtForceMode::Enum mode) { m_graphProcessor->addNodeForce(graphNode, toNvShared(localForce), mode); } bool ExtStressSolverImpl::addGravity(const NvBlastActor& actor, NvcVec3 localGravity) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node = graphNodeIndices[i]; m_graphProcessor->addNodeForce(node, toNvShared(localGravity), ExtForceMode::ACCELERATION); } return true; } return false; } bool ExtStressSolverImpl::addCentrifugalAcceleration(const NvBlastActor& actor, NvcVec3 localCenterMass, NvcVec3 localAngularVelocity) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); if (graphNodeCount > 1) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); // Apply centrifugal force for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node = graphNodeIndices[i]; const auto& localPos = m_graphProcessor->getNodeData(node).localPos; // a = w x (w x r) const NvVec3 centrifugalAcceleration = toNvShared(localAngularVelocity) .cross(toNvShared(localAngularVelocity).cross(localPos - toNvShared(localCenterMass))); m_graphProcessor->addNodeForce(node, centrifugalAcceleration, ExtForceMode::ACCELERATION); } return true; } return false; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Update /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ExtStressSolverImpl::update() { initialize(); solve(); m_framesCount++; } void ExtStressSolverImpl::solve() { NV_SIMD_GUARD; m_graphProcessor->solve(m_settings, m_bondHealths, m_bonds, WARM_START && !m_reset); m_reset = false; m_converged = m_graphProcessor->calcError(m_errorLinear, m_errorAngular); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Damage /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // check if this bond is over stressed in any way and generate a fracture command if it is bool ExtStressSolverImpl::generateStressDamage(const NvBlastActor& actor, uint32_t bondIndex, uint32_t node0, uint32_t node1) { const float bondHealth = m_bondHealths[bondIndex]; float stressCompression, stressTension, stressShear; if (bondHealth > 0.0f && m_graphProcessor->getBondStress(bondIndex, stressCompression, stressTension, stressShear)) { // compression and tension are mutually exclusive, only one can be positive at a time since they act in opposite directions float stressMultiplier = 0.0f; if (stressCompression > m_settings.compressionElasticLimit) { const float excessStress = stressCompression - m_settings.compressionElasticLimit; const float compressionDenom = m_settings.compressionFatalLimit - m_settings.compressionElasticLimit; const float compressionMultiplier = excessStress / (compressionDenom > 0.0f ? compressionDenom : 1.0f); stressMultiplier += compressionMultiplier; } else if (stressTension > m_settings.tensionElasticLimit) { const float excessStress = stressTension - m_settings.tensionElasticLimit; const float tensionDenom = m_settings.tensionFatalLimit - m_settings.tensionElasticLimit; const float tensionMultiplier = excessStress / (tensionDenom > 0.0f ? tensionDenom : 1.0f); stressMultiplier += tensionMultiplier; } // shear can co-exist with either compression or tension so must be accounted for independently of them if (stressShear > m_settings.shearElasticLimit) { const float excessStress = stressShear - m_settings.shearElasticLimit; const float shearDenom = m_settings.shearFatalLimit - m_settings.shearElasticLimit; const float shearMultiplier = excessStress / (shearDenom > 0.0f ? shearDenom : 1.0f); stressMultiplier += shearMultiplier; } if (stressMultiplier > 0.0f) { // bond health/area is reduced by excess pressure to approximate micro bonds in the material breaking const float bondDamage = bondHealth * stressMultiplier; const NvBlastBondFractureData data = { 0, node0, node1, bondDamage }; m_bondFractureBuffer.pushBack(data); // cache off the current health value for this bond // so it can be used to calculate forces to apply if it breaks later NvBlastActorCacheBondHeath(&actor, bondIndex, logLL); return true; } } return false; } void ExtStressSolverImpl::fillFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) { const uint32_t graphNodeCount = NvBlastActorGetGraphNodeCount(&actor, logLL); uint32_t commandCount = 0; if (graphNodeCount > 1 && m_graphProcessor->getOverstressedBondCount() > 0) { uint32_t* graphNodeIndices = getScratchArray<uint32_t>(graphNodeCount); const uint32_t nodeCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeCount, &actor, logLL); for (uint32_t i = 0; i < nodeCount; ++i) { const uint32_t node0 = graphNodeIndices[i]; for (uint32_t adjacencyIndex = m_graph.adjacencyPartition[node0]; adjacencyIndex < m_graph.adjacencyPartition[node0 + 1]; adjacencyIndex++) { const uint32_t node1 = m_graph.adjacentNodeIndices[adjacencyIndex]; if (node0 < node1) { const uint32_t bondIndex = m_graph.adjacentBondIndices[adjacencyIndex]; if (generateStressDamage(actor, bondIndex, node0, node1)) { commandCount++; } } } } } commands.chunkFractureCount = 0; commands.chunkFractures = nullptr; commands.bondFractureCount = commandCount; commands.bondFractures = commandCount > 0 ? m_bondFractureBuffer.end() - commandCount : nullptr; } void ExtStressSolverImpl::generateFractureCommands(const NvBlastActor& actor, NvBlastFractureBuffers& commands) { m_bondFractureBuffer.clear(); fillFractureCommands(actor, commands); } uint32_t ExtStressSolverImpl::generateFractureCommandsPerActor(const NvBlastActor** actorBuffer, NvBlastFractureBuffers* commandsBuffer, uint32_t bufferSize) { if (m_graphProcessor->getOverstressedBondCount() == 0) return 0; m_bondFractureBuffer.clear(); uint32_t index = 0; for (auto it = m_activeActors.getIterator(); !it.done() && index < bufferSize; ++it) { const NvBlastActor* actor = *it; NvBlastFractureBuffers& nextCommand = commandsBuffer[index]; fillFractureCommands(*actor, nextCommand); if (nextCommand.bondFractureCount > 0) { actorBuffer[index] = actor; index++; } } return index; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Debug Render /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// inline uint32_t NvVec4ToU32Color(const NvVec4& color) { return ((uint32_t)(color.w * 255) << 24) | // A ((uint32_t)(color.x * 255) << 16) | // R ((uint32_t)(color.y * 255) << 8) | // G ((uint32_t)(color.z * 255)); // B } static float Lerp(float v0, float v1, float val) { return v0 * (1 - val) + v1 * val; } inline float clamp01(float v) { return v < 0.0f ? 0.0f : (v > 1.0f ? 1.0f : v); } inline NvVec4 colorConvertHSVAtoRGBA(float h, float s, float v, float a) { const float t = 6.0f * (h - std::floor(h)); const int n = (int)t; const float m = t - (float)n; const float c = 1.0f - s; const float b[6] = { 1.0f, 1.0f - s * m, c, c, 1.0f - s * (1.0f - m), 1.0f }; return NvVec4(v * b[n % 6], v * b[(n + 4) % 6], v * b[(n + 2) % 6], a); // n % 6 protects against roundoff errors } inline uint32_t bondHealthColor(float stressPct) { stressPct = clamp01(stressPct); constexpr float BOND_HEALTHY_HUE = 1.0f/3.0f; // Green constexpr float BOND_ELASTIC_HUE = 0.0f; // Red constexpr float BOND_STRESSED_HUE = 2.0f/3.0f; // Blue constexpr float BOND_FATAL_HUE = 5.0f/6.0f; // Magenta const float hue = stressPct < 0.5f ? Lerp(BOND_HEALTHY_HUE, BOND_ELASTIC_HUE, 2.0f * stressPct) : Lerp(BOND_STRESSED_HUE, BOND_FATAL_HUE, 2.0f * stressPct - 1.0f); return NvVec4ToU32Color(colorConvertHSVAtoRGBA(hue, 1.0f, 1.0f, 1.0f)); } const ExtStressSolver::DebugBuffer ExtStressSolverImpl::fillDebugRender(const uint32_t* nodes, uint32_t nodeCount, DebugRenderMode mode, float scale) { NV_UNUSED(scale); const uint32_t BOND_UNBREAKABLE_COLOR = NvVec4ToU32Color(NvVec4(0.0f, 0.682f, 1.0f, 1.0f)); ExtStressSolver::DebugBuffer debugBuffer = { nullptr, 0 }; if (m_isDirty) return debugBuffer; m_debugLineBuffer.clear(); Array<uint8_t>::type& nodesSet = m_scratch; nodesSet.resize(m_graphProcessor->getSolverNodeCount()); memset(nodesSet.begin(), 0, nodesSet.size() * sizeof(uint8_t)); for (uint32_t i = 0; i < nodeCount; ++i) { NVBLAST_ASSERT(m_graphProcessor->getNodeData(nodes[i]).solverNode < nodesSet.size()); nodesSet[m_graphProcessor->getNodeData(nodes[i]).solverNode] = 1; } const uint32_t bondCount = m_graphProcessor->getSolverBondCount(); for (uint32_t i = 0; i < bondCount; ++i) { const auto& bondData = m_graphProcessor->getBondData(i); uint32_t node0, node1; m_graphProcessor->getSolverInternalBondNodes(i, node0, node1); if (nodesSet[node0] != 0) { //NVBLAST_ASSERT(nodesSet[node1] != 0); const auto& solverNode0 = m_graphProcessor->getSolverNodeData(node0); const auto& solverNode1 = m_graphProcessor->getSolverNodeData(node1); const NvcVec3 p0 = fromNvShared(solverNode0.mass > 0.0f ? solverNode0.localPos : bondData.centroid); const NvcVec3 p1 = fromNvShared(solverNode1.mass > 0.0f ? solverNode1.localPos : bondData.centroid); // don't render lines for broken bonds const float stressPct = m_graphProcessor->getSolverBondStressPct(i, m_bondHealths, m_settings, mode); if (stressPct >= 0.0f) { const uint32_t color = canTakeDamage(m_bondHealths[bondData.blastBondIndex]) ? bondHealthColor(stressPct) : BOND_UNBREAKABLE_COLOR; m_debugLineBuffer.pushBack(DebugLine(p0, p1, color)); } } } debugBuffer.lines = m_debugLineBuffer.begin(); debugBuffer.lineCount = m_debugLineBuffer.size(); return debugBuffer; } } // namespace Blast } // namespace Nv
68,856
C++
37.596973
181
0.586979
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageAcceleratorAABBTree.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtDamageAcceleratorAABBTree.h" #include "NvBlastIndexFns.h" #include "NvBlastAssert.h" #include "NvVec4.h" #include <algorithm> using namespace nvidia; namespace Nv { namespace Blast { /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Creation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ExtDamageAcceleratorAABBTree* ExtDamageAcceleratorAABBTree::create(const NvBlastAsset* asset) { ExtDamageAcceleratorAABBTree* tree = NVBLAST_NEW(Nv::Blast::ExtDamageAcceleratorAABBTree) (); tree->build(asset); return tree; } void ExtDamageAcceleratorAABBTree::release() { NVBLAST_DELETE(this, ExtDamageAcceleratorAABBTree); } void ExtDamageAcceleratorAABBTree::build(const NvBlastAsset* asset) { NVBLAST_ASSERT(m_root == nullptr); const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, logLL); const NvBlastBond* bonds = NvBlastAssetGetBonds(asset, logLL); const NvBlastChunk* chunks = NvBlastAssetGetChunks(asset, logLL); const uint32_t N = NvBlastAssetGetBondCount(asset, logLL); m_indices.resizeUninitialized(N); m_points.resizeUninitialized(N); m_segments.resizeUninitialized(N); m_bonds.resizeUninitialized(N); m_nodes.reserve(2 * N); for (uint32_t node0 = 0; node0 < graph.nodeCount; ++node0) { for (uint32_t j = graph.adjacencyPartition[node0]; j < graph.adjacencyPartition[node0 + 1]; ++j) { uint32_t bondIndex = graph.adjacentBondIndices[j]; uint32_t node1 = graph.adjacentNodeIndices[j]; if (node0 < node1) { const NvBlastBond& bond = bonds[bondIndex]; const NvVec3& p = (reinterpret_cast<const NvVec3&>(bond.centroid)); m_points[bondIndex] = p; m_indices[bondIndex] = bondIndex; m_bonds[bondIndex].node0 = node0; m_bonds[bondIndex].node1 = node1; // filling bond segments as a connection of 2 chunk centroids const uint32_t chunk0 = graph.chunkIndices[node0]; const uint32_t chunk1 = graph.chunkIndices[node1]; if (isInvalidIndex(chunk1)) { // for world node we don't have it's centroid, so approximate with projection on bond normal m_segments[bondIndex].p0 = (reinterpret_cast<const NvVec3&>(chunks[chunk0].centroid)); const NvVec3 normal = (reinterpret_cast<const NvVec3&>(bond.normal)); m_segments[bondIndex].p1 = m_segments[bondIndex].p0 + normal * (p - m_segments[bondIndex].p0).dot(normal) * 2; } else { m_segments[bondIndex].p0 = (reinterpret_cast<const NvVec3&>(chunks[chunk0].centroid)); m_segments[bondIndex].p1 = (reinterpret_cast<const NvVec3&>(chunks[chunk1].centroid)); } } } } int rootIndex = N > 0 ? createNode(0, N - 1, 0) : -1; m_root = rootIndex >= 0 ? &m_nodes[rootIndex] : nullptr; } int ExtDamageAcceleratorAABBTree::createNode(uint32_t startIdx, uint32_t endIdx, uint32_t depth) { if (startIdx > endIdx) return -1; Node node; node.first = startIdx; node.last = endIdx; // calc node bounds node.pointsBound = NvBounds3::empty(); node.segmentsBound = NvBounds3::empty(); for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; node.pointsBound.include(m_points[idx]); node.segmentsBound.include(m_segments[idx].p0); node.segmentsBound.include(m_segments[idx].p1); } // select axis of biggest extent const NvVec3 ext = node.pointsBound.getExtents(); uint32_t axis = 0; for (uint32_t k = 1; k < 3; k++) { if (ext[k] > ext[axis]) { axis = k; } } // split on selected axis and partially sort around the middle const uint32_t mid = startIdx + (endIdx - startIdx) / 2; std::nth_element(m_indices.begin() + startIdx, m_indices.begin() + mid, m_indices.begin() + endIdx + 1, [&](uint32_t lhs, uint32_t rhs) { return m_points[lhs][axis] < m_points[rhs][axis]; }); const uint32_t BUCKET = 32; if (endIdx - startIdx > BUCKET && mid > startIdx && mid < endIdx) { node.child[0] = createNode(startIdx, mid, depth + 1); node.child[1] = createNode(mid + 1, endIdx, depth + 1); } else { node.child[0] = -1; node.child[1] = -1; } m_nodes.pushBack(node); return m_nodes.size() - 1; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Queries /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void ExtDamageAcceleratorAABBTree::findInBounds(const nvidia::NvBounds3& bounds, ResultCallback& callback, bool segments) const { if (m_root) { if (segments) findSegmentsInBounds(*m_root, callback, bounds); else findPointsInBounds(*m_root, callback, bounds); callback.dispatch(); } } void ExtDamageAcceleratorAABBTree::findPointsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const { if (!bounds.intersects(node.pointsBound)) { return; } // if search bound contains node bound, simply add all point indexes. if (node.pointsBound.isInside(bounds)) { for (uint32_t i = node.first; i <= node.last; i++) pushResult(callback, m_indices[i]); return; // early pruning. } if (node.child[0] < 0) { for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; if (bounds.contains(m_points[idx])) pushResult(callback, idx); } return; } // check whether child nodes are in range. for (uint32_t c = 0; c < 2; ++c) { findPointsInBounds(m_nodes[node.child[c]], callback, bounds); } } void ExtDamageAcceleratorAABBTree::findSegmentsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const { if (!bounds.intersects(node.segmentsBound)) { return; } // if search bound contains node bound, simply add all point indexes. if (node.segmentsBound.isInside(bounds)) { for (uint32_t i = node.first; i <= node.last; i++) pushResult(callback, m_indices[i]); return; // early pruning. } if (node.child[0] < 0) { for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; if (bounds.contains(m_segments[idx].p0) || bounds.contains(m_segments[idx].p1)) pushResult(callback, idx); } return; } // check whether child nodes are in range. for (uint32_t c = 0; c < 2; ++c) { findSegmentsInBounds(m_nodes[node.child[c]], callback, bounds); } } bool intersectSegmentPlane(const NvVec3& v1, const NvVec3& v2, const NvPlane& p) { const bool s1 = p.distance(v1) > 0.f; const bool s2 = p.distance(v2) > 0.f; return (s1 && !s2) || (s2 && !s1); } bool intersectBoundsPlane(const NvBounds3& b, const NvPlane& p) { const NvVec3 extents = b.getExtents(); const NvVec3 center = b.getCenter(); float r = extents.x * NvAbs(p.n.x) + extents.y * NvAbs(p.n.y) + extents.z * NvAbs(p.n.z); float s = p.n.dot(center) + p.d; return NvAbs(s) <= r; } void ExtDamageAcceleratorAABBTree::findBondSegmentsPlaneIntersected(const nvidia::NvPlane& plane, ResultCallback& resultCallback) const { if (m_root) { findSegmentsPlaneIntersected(*m_root, resultCallback, plane); resultCallback.dispatch(); } } void ExtDamageAcceleratorAABBTree::findSegmentsPlaneIntersected(const Node& node, ResultCallback& callback, const nvidia::NvPlane& plane) const { if (!intersectBoundsPlane(node.segmentsBound, plane)) { return; } if (node.child[0] < 0) { for (uint32_t i = node.first; i <= node.last; i++) { const uint32_t idx = m_indices[i]; if (intersectSegmentPlane(m_segments[idx].p0, m_segments[idx].p1, plane)) pushResult(callback, idx); } return; } // check whether child nodes are in range. for (uint32_t c = 0; c < 2; ++c) { findSegmentsPlaneIntersected(m_nodes[node.child[c]], callback, plane); } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Debug Render /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// inline uint32_t NvVec4ToU32Color(const NvVec4& color) { uint32_t c = 0; c |= (int)(color.w * 255); c <<= 8; c |= (int)(color.z * 255); c <<= 8; c |= (int)(color.y * 255); c <<= 8; c |= (int)(color.x * 255); return c; } Nv::Blast::DebugBuffer ExtDamageAcceleratorAABBTree::fillDebugRender(int depth, bool segments) { Nv::Blast::DebugBuffer debugBuffer = { nullptr, 0 }; m_debugLineBuffer.clear(); if (m_root) { fillDebugBuffer(*m_root, 0, depth, segments); } debugBuffer.lines = m_debugLineBuffer.begin(); debugBuffer.lineCount = m_debugLineBuffer.size(); return debugBuffer; } void ExtDamageAcceleratorAABBTree::fillDebugBuffer(const Node& node, int currentDepth, int depth, bool segments) { if (depth < 0 || currentDepth == depth) { const NvVec4 LEAF_COLOR(1.0f, 1.0f, 1.0f, 1.0f); const NvVec4 NON_LEAF_COLOR(0.3f, 0.3f, 0.3f, 1.0f); // draw box const NvBounds3 bounds = segments ? node.segmentsBound : node.pointsBound; const NvVec3 center = bounds.getCenter(); const NvVec3 extents = bounds.getExtents(); const int vs[] = { 0,3,5,6 }; for (int i = 0; i < 4; i++) { int v = vs[i]; for (int d = 1; d < 8; d <<= 1) { auto flip = [](int x, int k) { return ((x >> k) & 1) * 2.f - 1.f; }; const float s = std::pow(0.99f, currentDepth); NvVec3 p0 = center + s * extents.multiply(NvVec3(flip(v, 0), flip(v, 1), flip(v, 2))); NvVec3 p1 = center + s * extents.multiply(NvVec3(flip(v^d, 0), flip(v^d, 1), flip(v^d, 2))); m_debugLineBuffer.pushBack(Nv::Blast::DebugLine( reinterpret_cast<NvcVec3&>(p0), reinterpret_cast<NvcVec3&>(p1), NvVec4ToU32Color(LEAF_COLOR * (1.f - (currentDepth + 1) * 0.1f))) ); } } } for (uint32_t i = 0; i < 2; ++i) { if (node.child[i] >= 0) { fillDebugBuffer(m_nodes[node.child[i]], currentDepth + 1, depth, segments); } } } } // namespace Blast } // namespace Nv
12,983
C++
33.168421
143
0.571594
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageAcceleratorAABBTree.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtDamageAcceleratorInternal.h" #include "NvBlast.h" #include "NvBlastArray.h" namespace Nv { namespace Blast { class ExtDamageAcceleratorAABBTree final : public ExtDamageAcceleratorInternal { public: //////// ctor //////// ExtDamageAcceleratorAABBTree() : m_root(nullptr) { } virtual ~ExtDamageAcceleratorAABBTree() { } static ExtDamageAcceleratorAABBTree* create(const NvBlastAsset* asset); //////// interface //////// virtual void release() override; virtual void findBondCentroidsInBounds(const nvidia::NvBounds3& bounds, ResultCallback& resultCallback) const override { const_cast<ExtDamageAcceleratorAABBTree*>(this)->findInBounds(bounds, resultCallback, false); } virtual void findBondSegmentsInBounds(const nvidia::NvBounds3& bounds, ResultCallback& resultCallback) const override { const_cast<ExtDamageAcceleratorAABBTree*>(this)->findInBounds(bounds, resultCallback, true); } virtual void findBondSegmentsPlaneIntersected(const nvidia::NvPlane& plane, ResultCallback& resultCallback) const override; virtual Nv::Blast::DebugBuffer fillDebugRender(int depth, bool segments) override; virtual void* getImmediateScratch(size_t size) override { m_scratch.resizeUninitialized(size); return m_scratch.begin(); } private: // no copy/assignment ExtDamageAcceleratorAABBTree(ExtDamageAcceleratorAABBTree&); ExtDamageAcceleratorAABBTree& operator=(const ExtDamageAcceleratorAABBTree& tree); // Tree node struct Node { int child[2]; uint32_t first; uint32_t last; nvidia::NvBounds3 pointsBound; nvidia::NvBounds3 segmentsBound; }; void build(const NvBlastAsset* asset); int createNode(uint32_t startIdx, uint32_t endIdx, uint32_t depth); void pushResult(ResultCallback& callback, uint32_t pointIndex) const { callback.push(pointIndex, m_bonds[pointIndex].node0, m_bonds[pointIndex].node1); } void findInBounds(const nvidia::NvBounds3& bounds, ResultCallback& callback, bool segments) const; void findPointsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const; void findSegmentsInBounds(const Node& node, ResultCallback& callback, const nvidia::NvBounds3& bounds) const; void findSegmentsPlaneIntersected(const Node& node, ResultCallback& callback, const nvidia::NvPlane& plane) const; void fillDebugBuffer(const Node& node, int currentDepth, int depth, bool segments); //////// data //////// Node* m_root; Array<Node>::type m_nodes; Array<uint32_t>::type m_indices; Array<nvidia::NvVec3>::type m_points; struct Segment { nvidia::NvVec3 p0; nvidia::NvVec3 p1; }; Array<Segment>::type m_segments; struct BondData { uint32_t node0; uint32_t node1; }; Array<BondData>::type m_bonds; Array<Nv::Blast::DebugLine>::type m_debugLineBuffer; Array<char>::type m_scratch; }; } // namespace Blast } // namespace Nv
4,860
C
31.844594
127
0.695885
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageShaders.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtDamageShaders.h" #include "NvBlastExtDamageAcceleratorInternal.h" #include "NvBlastIndexFns.h" #include "NvBlastMath.h" #include "NvBlastGeometry.h" #include "NvBlastAssert.h" #include "NvBlastFixedQueue.h" #include "NvBlastFixedBitmap.h" #include "NvBlast.h" #include <cmath> // for abs() on linux #include <new> using namespace Nv::Blast; using namespace Nv::Blast::VecMath; using namespace nvidia; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Profiles /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef float(*ProfileFunction)(float, float, float, float); float falloffProfile(float min, float max, float x, float f = 1.0f) { if (x > max) return 0.0f; if (x < min) return f; float y = 1.0f - (x - min) / (max - min); return y * f; } float cutterProfile(float min, float max, float x, float f = 1.0f) { if (x > max || x < min) return 0.0f; return f; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Damage Functions /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef float(*DamageFunction)(const float pos[3], const void* damageDescBuffer); template <ProfileFunction profileFn, typename DescT = NvBlastExtRadialDamageDesc> float pointDistanceDamage(const float pos[3], const void* damageDescBuffer) { const DescT& desc = *static_cast<const DescT*>(damageDescBuffer); float relativePosition[3]; sub(desc.position, pos, relativePosition); const float distance = sqrtf(dot(relativePosition, relativePosition)); const float damage = profileFn(desc.minRadius, desc.maxRadius, distance, desc.damage); return damage; } // Distance from point 'p' to line segment '(a, b)' float distanceToSegment(const float p[3], const float a[3], const float b[3]) { float v[3]; sub(b, a, v); float w[3]; sub(p, a, w); const float c1 = dot(v, w); if (c1 <= 0) return length(w); const float c2 = dot(v, v); if (c2 < c1) return dist(p, b); const float t = c1 / c2; mul(v, t); return dist(v, w); } template <ProfileFunction profileFn> float capsuleDistanceDamage(const float pos[3], const void* damageDesc) { const NvBlastExtCapsuleRadialDamageDesc& desc = *static_cast<const NvBlastExtCapsuleRadialDamageDesc*>(damageDesc); const float distance = distanceToSegment(pos, desc.position0, desc.position1); const float damage = profileFn(desc.minRadius, desc.maxRadius, distance, desc.damage); return damage; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // AABB Functions /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// typedef NvBounds3(*BoundFunction)(const void* damageDesc); NvBounds3 sphereBounds(const void* damageDesc) { const NvBlastExtRadialDamageDesc& desc = *static_cast<const NvBlastExtRadialDamageDesc*>(damageDesc); const nvidia::NvVec3& p = (reinterpret_cast<const nvidia::NvVec3&>(desc.position)); return nvidia::NvBounds3::centerExtents(p, nvidia::NvVec3(desc.maxRadius, desc.maxRadius, desc.maxRadius)); } NvBounds3 capsuleBounds(const void* damageDesc) { const NvBlastExtCapsuleRadialDamageDesc& desc = *static_cast<const NvBlastExtCapsuleRadialDamageDesc*>(damageDesc); const nvidia::NvVec3& p0 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position0)); const nvidia::NvVec3& p1 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position1)); NvBounds3 b = NvBounds3::empty(); b.include(p0); b.include(p1); b.fattenFast(desc.maxRadius); return b; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Radial Graph Shader Template /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <DamageFunction damageFn, BoundFunction boundsFn> void RadialProfileGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const float* familyBondHealths = actor->familyBondHealths; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); uint32_t outCount = 0; auto processBondFn = [&](uint32_t bondIndex, uint32_t node0, uint32_t node1) { // skip bonds that are already broken or were visited already // TODO: investigate why testing against health > -1.0f seems slower // could reuse the island edge bitmap instead if (canTakeDamage(familyBondHealths[bondIndex])) { const NvBlastBond& bond = assetBonds[bondIndex]; const float totalBondDamage = damageFn(bond.centroid, programParams->damageDesc); if (totalBondDamage > 0.0f) { NvBlastBondFractureData& outCommand = commandBuffers->bondFractures[outCount++]; outCommand.nodeIndex0 = node0; outCommand.nodeIndex1 = node1; outCommand.health = totalBondDamage; } } }; const ExtDamageAcceleratorInternal* damageAccelerator = programParams->accelerator ? static_cast<const ExtDamageAcceleratorInternal*>(programParams->accelerator) : nullptr; const uint32_t ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE = actor->assetNodeCount / 3; if (damageAccelerator && actor->graphNodeCount > ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE) { nvidia::NvBounds3 bounds = boundsFn(programParams->damageDesc); const uint32_t CALLBACK_BUFFER_SIZE = 1000; class AcceleratorCallback : public ExtDamageAcceleratorInternal::ResultCallback { public: AcceleratorCallback(NvBlastFractureBuffers* commandBuffers, uint32_t& outCount, const NvBlastGraphShaderActor* actor, const NvBlastExtProgramParams* programParams) : ExtDamageAcceleratorInternal::ResultCallback(m_buffer, CALLBACK_BUFFER_SIZE), m_actor(actor), m_commandBuffers(commandBuffers), m_outCount(outCount), m_programParams(programParams) { } virtual void processResults(const ExtDamageAcceleratorInternal::QueryBondData* bondBuffer, uint32_t count) override { for (uint32_t i = 0; i < count; i++) { const ExtDamageAcceleratorInternal::QueryBondData& bondData = bondBuffer[i]; if (m_actor->nodeActorIndices[bondData.node0] == m_actor->actorIndex) { if (canTakeDamage(m_actor->familyBondHealths[bondData.bond])) { const NvBlastBond& bond = m_actor->assetBonds[bondData.bond]; const float totalBondDamage = damageFn(bond.centroid, m_programParams->damageDesc); if (totalBondDamage > 0.0f) { NvBlastBondFractureData& outCommand = m_commandBuffers->bondFractures[m_outCount++]; outCommand.nodeIndex0 = bondData.node0; outCommand.nodeIndex1 = bondData.node1; outCommand.health = totalBondDamage; } } } } } private: const NvBlastGraphShaderActor* m_actor; NvBlastFractureBuffers* m_commandBuffers; uint32_t& m_outCount; const NvBlastExtProgramParams* m_programParams; ExtDamageAcceleratorInternal::QueryBondData m_buffer[CALLBACK_BUFFER_SIZE]; }; AcceleratorCallback cb(commandBuffers, outCount, actor, programParams); damageAccelerator->findBondCentroidsInBounds(bounds, cb); } else { uint32_t currentNodeIndex = firstGraphNodeIndex; while (!Nv::Blast::isInvalidIndex(currentNodeIndex)) { for (uint32_t adj = adjacencyPartition[currentNodeIndex]; adj < adjacencyPartition[currentNodeIndex + 1]; adj++) { uint32_t adjacentNodeIndex = adjacentNodeIndices[adj]; if (currentNodeIndex < adjacentNodeIndex) { uint32_t bondIndex = adjacentBondIndices[adj]; processBondFn(bondIndex, currentNodeIndex, adjacentNodeIndex); } } currentNodeIndex = graphNodeIndexLinks[currentNodeIndex]; } } commandBuffers->bondFractureCount = outCount; commandBuffers->chunkFractureCount = 0; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Radial Single Shader Template /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <DamageFunction damageFn> void RadialProfileSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; const uint32_t chunkIndex = actor->chunkIndex; const NvBlastChunk* assetChunks = actor->assetChunks; const NvBlastChunk& chunk = assetChunks[chunkIndex]; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const float totalDamage = damageFn(chunk.centroid, programParams->damageDesc); if (totalDamage > 0.0f && chunkFractureCount < chunkFractureCountMax) { NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = totalDamage; } commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = chunkFractureCount; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Radial Shaders Instantiation /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void NvBlastExtFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { RadialProfileGraphShader<pointDistanceDamage<falloffProfile>, sphereBounds>(commandBuffers, actor, params); } void NvBlastExtFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<pointDistanceDamage<falloffProfile>>(commandBuffers, actor, params); } void NvBlastExtCutterGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { RadialProfileGraphShader<pointDistanceDamage<cutterProfile>, sphereBounds>(commandBuffers, actor, params); } void NvBlastExtCutterSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<pointDistanceDamage<cutterProfile>>(commandBuffers, actor, params); } void NvBlastExtCapsuleFalloffGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { RadialProfileGraphShader<capsuleDistanceDamage<falloffProfile>, capsuleBounds>(commandBuffers, actor, params); } void NvBlastExtCapsuleFalloffSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<capsuleDistanceDamage<falloffProfile>>(commandBuffers, actor, params); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Shear Shader /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void NvBlastExtShearGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; uint32_t bondFractureCount = 0; uint32_t bondFractureCountMax = commandBuffers->bondFractureCount; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtShearDamageDesc& desc = *static_cast<const NvBlastExtShearDamageDesc*>(programParams->damageDesc); const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* chunkIndices = actor->chunkIndices; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const NvBlastChunk* assetChunks = actor->assetChunks; const float* familyBondHealths = actor->familyBondHealths; const float* supportChunkHealths = actor->supportChunkHealths; uint32_t closestNode = findClosestNode(desc.position , firstGraphNodeIndex, graphNodeIndexLinks , adjacencyPartition, adjacentNodeIndices, adjacentBondIndices , assetBonds, familyBondHealths , assetChunks, supportChunkHealths, chunkIndices); if (!isInvalidIndex(chunkIndices[closestNode])) { uint32_t nodeIndex = closestNode; float maxDist = 0.0f; uint32_t nextNode = invalidIndex<uint32_t>(); if (chunkFractureCount < chunkFractureCountMax) { const uint32_t chunkIndex = chunkIndices[nodeIndex]; const NvBlastChunk& chunk = assetChunks[chunkIndex]; NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = pointDistanceDamage<falloffProfile, NvBlastExtShearDamageDesc>(chunk.centroid, programParams->damageDesc); } do { const uint32_t startIndex = adjacencyPartition[nodeIndex]; const uint32_t stopIndex = adjacencyPartition[nodeIndex + 1]; for (uint32_t adjacentNodeIndex = startIndex; adjacentNodeIndex < stopIndex; adjacentNodeIndex++) { const uint32_t neighbourIndex = adjacentNodeIndices[adjacentNodeIndex]; const uint32_t bondIndex = adjacentBondIndices[adjacentNodeIndex]; const NvBlastBond& bond = assetBonds[bondIndex]; if (!canTakeDamage(familyBondHealths[bondIndex])) continue; float shear = 1 * std::abs(1 - std::abs(VecMath::dot(desc.normal, bond.normal))); float d[3]; VecMath::sub(bond.centroid, desc.position, d); float ahead = VecMath::dot(d, desc.normal); if (ahead > maxDist) { maxDist = ahead; nextNode = neighbourIndex; } const float damage = pointDistanceDamage<falloffProfile, NvBlastExtShearDamageDesc>(bond.centroid, programParams->damageDesc); if (damage > 0.0f && bondFractureCount < bondFractureCountMax) { NvBlastBondFractureData& frac = commandBuffers->bondFractures[bondFractureCount++]; frac.userdata = bond.userData; frac.nodeIndex0 = nodeIndex; frac.nodeIndex1 = neighbourIndex; frac.health = shear * damage; } } if (nodeIndex == nextNode) break; nodeIndex = nextNode; } while (!isInvalidIndex(nextNode)); } commandBuffers->bondFractureCount = bondFractureCount; commandBuffers->chunkFractureCount = chunkFractureCount; } void NvBlastExtShearSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { RadialProfileSubgraphShader<pointDistanceDamage<falloffProfile, NvBlastExtShearDamageDesc>>(commandBuffers, actor, params); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Triangle Intersection Damage /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define SMALL_NUMBER (1.e-4f) bool intersectSegmentTriangle(const NvVec3& p, const NvVec3& q, const NvVec3& a, const NvVec3& b, const NvVec3& c, const NvPlane& trianglePlane) { const NvVec3 N = trianglePlane.n; const float D = trianglePlane.d; NvVec3 intersectPoint; float t = (-D - (p.dot(N))) / ((q - p).dot(N)); // If the parameter value is not between 0 and 1, there is no intersection if (t > -SMALL_NUMBER && t < 1.f + SMALL_NUMBER) { intersectPoint = p + t * (q - p); } else { return false; } // Compute the normal of the triangle const NvVec3 TriNorm = (b - a).cross(c - a); // Compute twice area of triangle ABC const float AreaABCInv = 1.0f / (N.dot(TriNorm)); // Compute v contribution const float AreaPBC = N.dot((b - intersectPoint).cross(c - intersectPoint)); const float v = AreaPBC * AreaABCInv; if (v <= 0.f) return false; // Compute w contribution const float AreaPCA = N.dot((c - intersectPoint).cross(a - intersectPoint)); const float w = AreaPCA * AreaABCInv; if (w <= 0.f) return false; const float u = 1.0f - v - w; return u > 0.f; } void NvBlastExtTriangleIntersectionGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const NvBlastChunk* assetChunks = actor->assetChunks; const uint32_t* chunkIndices = actor->chunkIndices; const float* familyBondHealths = actor->familyBondHealths; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtTriangleIntersectionDamageDesc& desc = *static_cast<const NvBlastExtTriangleIntersectionDamageDesc*>(programParams->damageDesc); const nvidia::NvVec3& t0 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position0)); const nvidia::NvVec3& t1 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position1)); const nvidia::NvVec3& t2 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position2)); const NvPlane trianglePlane(t0, t1, t2); uint32_t outCount = 0; const ExtDamageAcceleratorInternal* damageAccelerator = programParams->accelerator ? static_cast<const ExtDamageAcceleratorInternal*>(programParams->accelerator) : nullptr; const uint32_t ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE = actor->assetNodeCount / 3; if (damageAccelerator && actor->graphNodeCount > ACTOR_MINIMUM_NODE_COUNT_TO_ACCELERATE) { const uint32_t CALLBACK_BUFFER_SIZE = 1000; class AcceleratorCallback : public ExtDamageAcceleratorInternal::ResultCallback { public: AcceleratorCallback(NvBlastFractureBuffers* commandBuffers, uint32_t& outCount, const NvBlastGraphShaderActor* actor, const NvBlastExtTriangleIntersectionDamageDesc& desc) : ExtDamageAcceleratorInternal::ResultCallback(m_buffer, CALLBACK_BUFFER_SIZE), m_actor(actor), m_commandBuffers(commandBuffers), m_outCount(outCount), m_desc(desc) { } virtual void processResults(const ExtDamageAcceleratorInternal::QueryBondData* bondBuffer, uint32_t count) override { const nvidia::NvVec3& t0 = (reinterpret_cast<const nvidia::NvVec3&>(m_desc.position0)); const nvidia::NvVec3& t1 = (reinterpret_cast<const nvidia::NvVec3&>(m_desc.position1)); const nvidia::NvVec3& t2 = (reinterpret_cast<const nvidia::NvVec3&>(m_desc.position2)); const NvPlane trianglePlane(t0, t1, t2); for (uint32_t i = 0; i < count; i++) { const ExtDamageAcceleratorInternal::QueryBondData& bondData = bondBuffer[i]; if (m_actor->nodeActorIndices[bondData.node0] == m_actor->actorIndex) { if (canTakeDamage(m_actor->familyBondHealths[bondData.bond])) { const NvBlastBond& bond = m_actor->assetBonds[bondData.bond]; const uint32_t chunkIndex0 = m_actor->chunkIndices[bondData.node0]; const uint32_t chunkIndex1 = m_actor->chunkIndices[bondData.node1]; const nvidia::NvVec3& c0 = (reinterpret_cast<const nvidia::NvVec3&>(m_actor->assetChunks[chunkIndex0].centroid)); const NvVec3& normal = (reinterpret_cast<const NvVec3&>(bond.normal)); const NvVec3& bondCentroid = (reinterpret_cast<const NvVec3&>(bond.centroid)); const nvidia::NvVec3& c1 = isInvalidIndex(chunkIndex1) ? (c0 + normal * (bondCentroid - c0).dot(normal)) : (reinterpret_cast<const nvidia::NvVec3&>(m_actor->assetChunks[chunkIndex1].centroid)); if(intersectSegmentTriangle(c0, c1, t0, t1, t2, trianglePlane)) { NvBlastBondFractureData& outCommand = m_commandBuffers->bondFractures[m_outCount++]; outCommand.nodeIndex0 = bondData.node0; outCommand.nodeIndex1 = bondData.node1; outCommand.health = m_desc.damage; } } } } } private: const NvBlastGraphShaderActor* m_actor; NvBlastFractureBuffers* m_commandBuffers; uint32_t& m_outCount; const NvBlastExtTriangleIntersectionDamageDesc& m_desc; ExtDamageAcceleratorInternal::QueryBondData m_buffer[CALLBACK_BUFFER_SIZE]; }; AcceleratorCallback cb(commandBuffers, outCount, actor, desc); damageAccelerator->findBondSegmentsPlaneIntersected(trianglePlane, cb); } else { uint32_t currentNodeIndex = firstGraphNodeIndex; while (!Nv::Blast::isInvalidIndex(currentNodeIndex)) { for (uint32_t adj = adjacencyPartition[currentNodeIndex]; adj < adjacencyPartition[currentNodeIndex + 1]; adj++) { uint32_t adjacentNodeIndex = adjacentNodeIndices[adj]; if (currentNodeIndex < adjacentNodeIndex) { uint32_t bondIndex = adjacentBondIndices[adj]; // skip bonds that are already broken or were visited already // TODO: investigate why testing against health > -1.0f seems slower // could reuse the island edge bitmap instead if (canTakeDamage(familyBondHealths[bondIndex])) { const NvBlastBond& bond = assetBonds[bondIndex]; const uint32_t chunkIndex0 = chunkIndices[currentNodeIndex]; const uint32_t chunkIndex1 = chunkIndices[adjacentNodeIndex]; const nvidia::NvVec3& c0 = (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex0].centroid)); const NvVec3& normal = (reinterpret_cast<const NvVec3&>(bond.normal)); const NvVec3& bondCentroid = (reinterpret_cast<const NvVec3&>(bond.centroid)); const nvidia::NvVec3& c1 = isInvalidIndex(chunkIndex1) ? (c0 + normal * (bondCentroid - c0).dot(normal)) : (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex1].centroid)); if (intersectSegmentTriangle(c0, c1, t0, t1, t2, trianglePlane)) { NvBlastBondFractureData& outCommand = commandBuffers->bondFractures[outCount++]; outCommand.nodeIndex0 = currentNodeIndex; outCommand.nodeIndex1 = adjacentNodeIndex; outCommand.health = desc.damage; } } } } currentNodeIndex = graphNodeIndexLinks[currentNodeIndex]; } } commandBuffers->bondFractureCount = outCount; commandBuffers->chunkFractureCount = 0; } void NvBlastExtTriangleIntersectionSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; const uint32_t chunkIndex = actor->chunkIndex; const NvBlastChunk* assetChunks = actor->assetChunks; const NvBlastChunk& chunk = assetChunks[chunkIndex]; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtTriangleIntersectionDamageDesc& desc = *static_cast<const NvBlastExtTriangleIntersectionDamageDesc*>(programParams->damageDesc); const nvidia::NvVec3& t0 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position0)); const nvidia::NvVec3& t1 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position1)); const nvidia::NvVec3& t2 = (reinterpret_cast<const nvidia::NvVec3&>(desc.position2)); const NvPlane trianglePlane(t0, t1, t2); for (uint32_t subChunkIndex = chunk.firstChildIndex; subChunkIndex < chunk.childIndexStop; subChunkIndex++) { const nvidia::NvVec3& c0 = (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[subChunkIndex].centroid)); const nvidia::NvVec3& c1 = (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[subChunkIndex + 1].centroid)); if (chunkFractureCount < chunkFractureCountMax && intersectSegmentTriangle(c0, c1, t0, t1, t2, trianglePlane)) { NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = desc.damage; break; } } commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = chunkFractureCount; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Impact Spread Shader /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void NvBlastExtImpactSpreadGraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastGraphShaderActor* actor, const void* params) { uint32_t bondFractureCount = 0; uint32_t bondFractureCountMax = commandBuffers->bondFractureCount; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtImpactSpreadDamageDesc& desc = *static_cast<const NvBlastExtImpactSpreadDamageDesc*>(programParams->damageDesc); const uint32_t* graphNodeIndexLinks = actor->graphNodeIndexLinks; const uint32_t firstGraphNodeIndex = actor->firstGraphNodeIndex; const uint32_t* chunkIndices = actor->chunkIndices; const uint32_t* adjacencyPartition = actor->adjacencyPartition; const uint32_t* adjacentNodeIndices = actor->adjacentNodeIndices; const uint32_t* adjacentBondIndices = actor->adjacentBondIndices; const NvBlastBond* assetBonds = actor->assetBonds; const NvBlastChunk* assetChunks = actor->assetChunks; const float* familyBondHealths = actor->familyBondHealths; const float* supportChunkHealths = actor->supportChunkHealths; // Find nearest chunk. uint32_t closestNode = findClosestNode(desc.position , firstGraphNodeIndex, graphNodeIndexLinks , adjacencyPartition, adjacentNodeIndices, adjacentBondIndices , assetBonds, familyBondHealths , assetChunks, supportChunkHealths, chunkIndices); // Breadth-first support graph traversal. For radial falloff metric distance is measured along the edges of the graph ExtDamageAcceleratorInternal* damageAccelerator = programParams->accelerator ? static_cast<ExtDamageAcceleratorInternal*>(programParams->accelerator) : nullptr; NVBLAST_ASSERT_WITH_MESSAGE(damageAccelerator, "This shader requires damage accelerator passed"); if (!isInvalidIndex(chunkIndices[closestNode]) && damageAccelerator) { struct NodeData { uint32_t index; float distance; }; // Calculating scratch size and requesting it from the accelerator const uint32_t bondCount = actor->adjacencyPartition[actor->assetNodeCount]; const size_t nodeQueueSize = align16(FixedQueue<NodeData>::requiredMemorySize(actor->graphNodeCount)); const size_t visitedBitmapSize = align16(FixedBitmap::requiredMemorySize(bondCount)); const size_t scratchSize = 16 + nodeQueueSize + visitedBitmapSize; void* scratch = damageAccelerator->getImmediateScratch(scratchSize); // prepare intermediate data on scratch scratch = (void*)align16((size_t)scratch); // Bump to 16-byte alignment FixedQueue<NodeData>* nodeQueue = new (scratch)FixedQueue<NodeData>(actor->graphNodeCount); scratch = pointerOffset(scratch, align16(nodeQueueSize)); FixedBitmap* visitedBitmap = new (scratch)FixedBitmap(bondCount); scratch = pointerOffset(scratch, align16(FixedBitmap::requiredMemorySize(bondCount))); // initalize traversal nodeQueue->pushBack({ closestNode, 0.f }); visitedBitmap->clear(); while (!nodeQueue->empty()) { NodeData currentNode = nodeQueue->popFront(); const uint32_t startIndex = adjacencyPartition[currentNode.index]; const uint32_t stopIndex = adjacencyPartition[currentNode.index + 1]; for (uint32_t adjacentNodeIndex = startIndex; adjacentNodeIndex < stopIndex; adjacentNodeIndex++) { const uint32_t neighbourIndex = adjacentNodeIndices[adjacentNodeIndex]; const uint32_t bondIndex = adjacentBondIndices[adjacentNodeIndex]; const NvBlastBond& bond = assetBonds[bondIndex]; const NvVec3& bondCentroid = (reinterpret_cast<const NvVec3&>(bond.centroid)); if (!canTakeDamage(familyBondHealths[bondIndex])) continue; if (visitedBitmap->test(bondIndex)) continue; visitedBitmap->set(bondIndex); const uint32_t chunkIndex0 = chunkIndices[currentNode.index]; const uint32_t chunkIndex1 = chunkIndices[neighbourIndex]; const nvidia::NvVec3& c0 = reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex0].centroid); bool isNeighbourWorldChunk = isInvalidIndex(chunkIndex1); const nvidia::NvVec3& c1 = isNeighbourWorldChunk ? bondCentroid : (reinterpret_cast<const nvidia::NvVec3&>(assetChunks[chunkIndex1].centroid)); const float distance = (c1 - c0).magnitude() * (isNeighbourWorldChunk ? 2.f : 1.f); float totalDistance = currentNode.distance + distance; float totalDamage = desc.damage * falloffProfile(desc.minRadius, desc.maxRadius, totalDistance); if (totalDamage > 0.0f && bondFractureCount < bondFractureCountMax) { NvBlastBondFractureData& frac = commandBuffers->bondFractures[bondFractureCount++]; frac.userdata = bond.userData; frac.nodeIndex0 = currentNode.index; frac.nodeIndex1 = neighbourIndex; frac.health = totalDamage; if (!isNeighbourWorldChunk) { nodeQueue->pushBack({ neighbourIndex, totalDistance }); } } } } } commandBuffers->bondFractureCount = bondFractureCount; commandBuffers->chunkFractureCount = 0; } void NvBlastExtImpactSpreadSubgraphShader(NvBlastFractureBuffers* commandBuffers, const NvBlastSubgraphShaderActor* actor, const void* params) { uint32_t chunkFractureCount = 0; uint32_t chunkFractureCountMax = commandBuffers->chunkFractureCount; const uint32_t chunkIndex = actor->chunkIndex; const NvBlastExtProgramParams* programParams = static_cast<const NvBlastExtProgramParams*>(params); const NvBlastExtImpactSpreadDamageDesc& desc = *static_cast<const NvBlastExtImpactSpreadDamageDesc*>(programParams->damageDesc); if (chunkFractureCount < chunkFractureCountMax) { NvBlastChunkFractureData& frac = commandBuffers->chunkFractures[chunkFractureCount++]; frac.chunkIndex = chunkIndex; frac.health = desc.damage; } commandBuffers->bondFractureCount = 0; commandBuffers->chunkFractureCount = chunkFractureCount; }
36,490
C++
47.460823
185
0.626528
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageAcceleratorInternal.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtDamageShaders.h" #include "NvBounds3.h" namespace Nv { namespace Blast { class ExtDamageAcceleratorInternal : public NvBlastExtDamageAccelerator { public: struct QueryBondData { uint32_t bond; uint32_t node0; uint32_t node1; }; class ResultCallback { public: ResultCallback(QueryBondData* buffer, uint32_t count) : m_bondBuffer(buffer), m_bondMaxCount(count), m_bondCount(0) {} virtual void processResults(const QueryBondData* bondBuffer, uint32_t count) = 0; void push(uint32_t bond, uint32_t node0, uint32_t node1) { m_bondBuffer[m_bondCount].bond = bond; m_bondBuffer[m_bondCount].node0 = node0; m_bondBuffer[m_bondCount].node1 = node1; m_bondCount++; if (m_bondCount == m_bondMaxCount) { dispatch(); } } void dispatch() { if (m_bondCount) { processResults(m_bondBuffer, m_bondCount); m_bondCount = 0; } } private: QueryBondData* m_bondBuffer; uint32_t m_bondMaxCount; uint32_t m_bondCount; }; virtual void findBondCentroidsInBounds(const nvidia::NvBounds3& bounds, ResultCallback& resultCallback) const = 0; virtual void findBondSegmentsInBounds(const nvidia::NvBounds3& bounds, ResultCallback& resultCallback) const = 0; virtual void findBondSegmentsPlaneIntersected(const nvidia::NvPlane& plane, ResultCallback& resultCallback) const = 0; // Non-thread safe! Multiple calls return the same memory. virtual void* getImmediateScratch(size_t size) = 0; }; } // namespace Blast } // namespace Nv
3,373
C
34.145833
122
0.684554
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/shaders/NvBlastExtDamageAccelerators.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. //#include "NvBlastExtDamageAcceleratorOctree.h" //#include "NvBlastExtDamageAcceleratorKdtree.h" #include "NvBlastExtDamageAcceleratorAABBTree.h" NvBlastExtDamageAccelerator* NvBlastExtDamageAcceleratorCreate(const NvBlastAsset* asset, int type) { switch (type) { case 0: return nullptr; default: return Nv::Blast::ExtDamageAcceleratorAABBTree::create(asset); break; } }
2,001
C++
45.558138
99
0.753623
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtInputStream.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtInputStream.h" Nv::Blast::ExtInputStream::ExtInputStream(std::istream &inputStream) : m_inputStream(inputStream) { } size_t Nv::Blast::ExtInputStream::tryRead(void* buffer, size_t /*minBytes*/, size_t maxBytes) { m_inputStream.read((char *) buffer, maxBytes); if (m_inputStream.fail()) { // Throw exception, log error // NVBLAST_LOG_ERROR("Failure when reading from stream"); } // Since we're using a blocking read above, if we don't have maxBytes we're probably done if ((size_t) m_inputStream.gcount() < maxBytes) { // NVBLAST_LOG_ERROR("Failed to read requested number of bytes during blocking read."); } return m_inputStream.gcount(); }
2,291
C++
39.928571
93
0.736796
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtSerializationInternal.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtSerialization.h" #include <cstring> #define ExtSerializerBoilerplate(_name, _description, _objectTypeID, _encodingID) \ virtual const char* getName() const override { return _name; } \ virtual const char* getDescription() const override { return _description; } \ virtual uint32_t getObjectTypeID() const override { return _objectTypeID; } \ virtual uint32_t getEncodingID() const override { return _encodingID; } #define ExtSerializerReadOnly(_name) \ virtual bool isReadOnly() const override { return true; } \ virtual uint64_t serializeIntoBuffer \ ( \ void*& buffer, \ ExtSerialization::BufferProvider& bufferProvider, \ const void* object, \ uint64_t offset = 0 \ ) override \ { \ NVBLAST_LOG_WARNING(#_name "::serializeIntoBuffer: serializer is read-only."); \ NV_UNUSED(buffer); \ NV_UNUSED(bufferProvider); \ NV_UNUSED(object); \ NV_UNUSED(offset); \ return 0; \ } #define ExtSerializerDefaultFactoryAndRelease(_classname) \ static ExtSerializer* create() \ { \ return NVBLAST_NEW(_classname) (); \ } \ virtual void release() override \ { \ NVBLAST_DELETE(this, _classname); \ } namespace Nv { namespace Blast { /** Serializer internal interface */ class ExtSerializer { public: virtual ~ExtSerializer() {} /** return the name of this serializer. */ virtual const char* getName() const = 0; /** return a description of this serializer. */ virtual const char* getDescription() const = 0; /** return an identifier for the type of object handled. */ virtual uint32_t getObjectTypeID() const = 0; /** return an identifier for serialization format. */ virtual uint32_t getEncodingID() const = 0; /** Whether or not this serializer supports writing. Legacy formats, for example, may not. \return true iff this serialization does not support writing. */ virtual bool isReadOnly() const { return false; } /** Deserialize from a buffer into a newly allocated object. \param[in] buffer Pointer to the buffer to read. \param[in] size Size of the buffer to read. \return object pointer; returns null if failed to deserialize. */ virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) = 0; /** Serialize into a buffer. Allocates the buffer internally using the ExtSerialization::BufferProvider callack interface. \param[out] buffer Pointer to the buffer created. \param[in] bufferProvider The buffer provider callback interface to use. \param[in] object Object pointer. \return the number of bytes serialized into the buffer (zero if unsuccessful). */ virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) = 0; /** Release the serializer and free associated memory. */ virtual void release() = 0; }; /** Internal serialization manager interface */ class ExtSerializationInternal : public ExtSerialization { public: /** Internal interfaces to register and unregister a serializer, used by modules to automatically register all of their serializers with a serialization manager. */ virtual bool registerSerializer(ExtSerializer& serializer) = 0; virtual bool unregisterSerializer(ExtSerializer& serializer) = 0; /** Find a registered serializer for the given object type and encoding. \param[in] objectTypeID ID for the requested object type. \param[in] encodingID ID for the requested encoding (see EncodingID). \return a registered serializer if found, NULL otherwise. */ virtual ExtSerializer* findSerializer(uint32_t objectTypeID, uint32_t encodingID) = 0; //// Enums //// enum { HeaderSize = 128 }; }; template<typename Factory, size_t N> size_t ExtSerializationLoadSet(Nv::Blast::ExtSerializationInternal& serialization, Factory(&factories)[N]) { size_t count = 0; for (auto f : factories) { Nv::Blast::ExtSerializer* serializer = f(); if (serializer != nullptr) { if (serialization.registerSerializer(*serializer)) { ++count; } else { NVBLAST_LOG_ERROR("Nv::Blast::ExtSerializationLoadSet: failed to register serailizer:"); NVBLAST_LOG_ERROR(serializer->getName()); serializer->release(); } } else { NVBLAST_LOG_ERROR("Nv::Blast::ExtSerializationLoadSet: failed to create serailizer."); } } return count; } class ExtIStream { public: enum Flags { LittleEndian = (1 << 0), Fail = (1 << 1) }; ExtIStream(const void* buffer, size_t size) : m_buf(reinterpret_cast<const char*>(buffer)), m_flags(0) { m_cur = m_buf; m_end = m_buf + size; const uint16_t x = LittleEndian; m_flags = *reinterpret_cast<const char*>(&x); } bool advance(ptrdiff_t diff) { m_cur += diff; if (m_cur < m_buf) { m_cur = m_buf; m_flags |= Fail; return false; } else if (m_cur > m_end) { m_cur = m_end; m_flags |= Fail; return false; } return true; } const void* view() { return m_cur; } bool read(void* buffer, size_t size) { if (!canRead(size)) return false; std::memcpy(buffer, m_cur, size); m_cur += size; return true; } size_t tellg() const { return m_cur - m_buf; } size_t left() const { return m_end - m_cur; } bool eof() const { return m_cur >= m_end; } bool fail() const { return (m_flags & Fail) != 0; } private: const char* m_buf; const char* m_cur; const char* m_end; uint32_t m_flags; bool isLittleEndian() const { return (m_flags & LittleEndian) != 0; } bool canRead(size_t size) const { return m_cur + size <= m_end; } template<typename T> friend ExtIStream& operator >> (ExtIStream& s, T& x); }; template<typename T> NV_INLINE ExtIStream& operator >> (ExtIStream& s, T& x) { if (s.canRead(sizeof(T))) { if (s.isLittleEndian()) { x = *reinterpret_cast<const T*>(s.m_cur); s.m_cur += sizeof(T); } else { char* b = reinterpret_cast<char*>(&x) + sizeof(T); for (size_t n = sizeof(T); n--;) *--b = *s.m_cur++; } } else { s.m_flags |= ExtIStream::Fail; } return s; } } // namespace Blast } // namespace Nv
9,883
C
32.965636
154
0.540828
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtOutputStream.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "kj/io.h" #include <ostream> namespace Nv { namespace Blast { class ExtOutputStream : public kj::OutputStream { public: ExtOutputStream() = delete; ExtOutputStream(std::ostream &outputStream); virtual void write(const void* buffer, size_t size) override; private: std::ostream &m_outputStream; }; } // namespace Blast } // namespace Nv
1,944
C
36.403845
74
0.756173
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtTkSerializerRAW.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerializationInternal.h" #include "NvBlastTkFramework.h" #include "NvBlastTkAsset.h" #include "NvBlast.h" namespace Nv { namespace Blast { // Legacy IDs struct ExtTkSerializationLegacyID { enum Enum { Framework = NVBLAST_FOURCC('T', 'K', 'F', 'W'), //!< TkFramework identifier token, used in serialization Asset = NVBLAST_FOURCC('A', 'S', 'S', 'T'), //!< TkAsset identifier token, used in serialization Family = NVBLAST_FOURCC('A', 'C', 'T', 'F'), //!< TkFamily identifier token, used in serialization }; }; // Legacy object format versions struct ExtTkSerializationLegacyAssetVersion { enum Enum { /** Initial version */ Initial, // New formats must come before Count. They should be given descriptive names with more information in comments. /** The number of serialized formats. */ Count, /** The current version. This should always be Count-1 */ Current = Count - 1 }; }; struct ExtTkSerializationLegacyFamilyVersion { enum Enum { /** Initial version */ Initial, // New formats must come before Count. They should be given descriptive names with more information in comments. /** The number of serialized formats. */ Count, /** The current version. This should always be Count-1 */ Current = Count - 1 }; }; static bool deserializeTkObjectHeader(uint32_t& legacyTypeID, uint32_t& legacyVersion, NvBlastID& objID, uint64_t& userIntData, ExtIStream& stream) { // Read framework ID uint32_t fwkID = 0; // Initialize to silence some compilers stream >> fwkID; if (fwkID != ExtTkSerializationLegacyID::Framework) { NVBLAST_LOG_ERROR("deserializeTkObjectHeader: stream does not contain a BlastTk legacy object."); return false; } // Read object class ID stream >> legacyTypeID; // Read object class version and ensure it's current stream >> legacyVersion; // Object ID stream.read(objID.data, sizeof(NvBlastID)); // Serializable user data uint32_t lsd, msd; stream >> lsd >> msd; userIntData = static_cast<uint64_t>(msd) << 32 | static_cast<uint64_t>(lsd); return !stream.fail(); } TkAsset* deserializeTkAsset(ExtIStream& stream, TkFramework& framework) { // Deserializer header uint32_t legacyTypeID; uint32_t legacyVersion; NvBlastID objID; uint64_t userIntData; if (!deserializeTkObjectHeader(legacyTypeID, legacyVersion, objID, userIntData, stream)) { return nullptr; } if (legacyTypeID != ExtTkSerializationLegacyID::Asset) { NVBLAST_LOG_ERROR("deserializeTkAsset: stream does not contain a BlastTk legacy asset."); return nullptr; } if (legacyVersion > ExtTkSerializationLegacyAssetVersion::Current) { NVBLAST_LOG_ERROR("deserializeTkAsset: stream contains a BlastTk legacy asset which is in an unknown version."); return nullptr; } // LL asset uint32_t assetSize; stream >> assetSize; NvBlastAsset* llAsset = static_cast<NvBlastAsset*>(NVBLAST_ALLOC_NAMED(assetSize, "deserializeTkAsset")); stream.read(reinterpret_cast<char*>(llAsset), assetSize); // Joint descs uint32_t jointDescCount; stream >> jointDescCount; std::vector<TkAssetJointDesc> jointDescs(jointDescCount); for (uint32_t i = 0; i < jointDescs.size(); ++i) { TkAssetJointDesc& jointDesc = jointDescs[i]; stream >> jointDesc.nodeIndices[0]; stream >> jointDesc.nodeIndices[1]; stream >> jointDesc.attachPositions[0].x; stream >> jointDesc.attachPositions[0].y; stream >> jointDesc.attachPositions[0].z; stream >> jointDesc.attachPositions[1].x; stream >> jointDesc.attachPositions[1].y; stream >> jointDesc.attachPositions[1].z; } if (stream.fail()) { NVBLAST_FREE(llAsset); return nullptr; } TkAsset* asset = framework.createAsset(llAsset, jointDescs.data(), (uint32_t)jointDescs.size(), true); NvBlastID zeroID; memset(zeroID.data, 0, sizeof(zeroID)); if (!memcmp(zeroID.data, objID.data, sizeof(NvBlastID))) { asset->setID(objID); } asset->userIntData = userIntData; return asset; } } // namespace Blast } // namespace Nv
5,965
C++
31.423913
147
0.682481
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtSerializationCAPN.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "capnp/serialize.h" #include "NvBlastExtInputStream.h" #include "NvBlastExtOutputStream.h" #include "NvBlastArray.h" #include "NvBlastExtSerialization.h" namespace Nv { namespace Blast { template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> class ExtSerializationCAPN { public: static TObject* deserializeFromBuffer(const unsigned char* input, uint64_t size); static TObject* deserializeFromStream(std::istream& inputStream); static uint64_t serializationBufferSize(const TObject* object); static bool serializeIntoBuffer(const TObject* object, unsigned char* buffer, uint64_t maxSize, uint64_t& usedSize); static bool serializeIntoBuffer(const TObject *object, unsigned char*& buffer, uint64_t& size, ExtSerialization::BufferProvider* bufferProvider = nullptr, uint64_t offset = 0); static bool serializeIntoStream(const TObject* object, std::ostream& outputStream); private: // Specialized static bool serializeIntoBuilder(TSerializationBuilder& objectBuilder, const TObject* object); static bool serializeIntoMessage(capnp::MallocMessageBuilder& message, const TObject* object); static TObject* deserializeFromStreamReader(capnp::InputStreamMessageReader& message); }; template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> TObject* ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::deserializeFromBuffer(const unsigned char* input, uint64_t size) { kj::ArrayPtr<const unsigned char> source(input, size); kj::ArrayInputStream inputStream(source); Nv::Blast::Array<uint64_t>::type scratch(static_cast<uint32_t>(size)); kj::ArrayPtr<capnp::word> scratchArray((capnp::word*) scratch.begin(), size); capnp::InputStreamMessageReader message(inputStream, capnp::ReaderOptions(), scratchArray); return deserializeFromStreamReader(message); } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> TObject* ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::deserializeFromStream(std::istream& inputStream) { ExtInputStream readStream(inputStream); capnp::InputStreamMessageReader message(readStream); return deserializeFromStreamReader(message); } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> uint64_t ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializationBufferSize(const TObject* object) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { return 0; } return computeSerializedSizeInWords(message) * sizeof(uint64_t); } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> bool ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializeIntoBuffer(const TObject* object, unsigned char* buffer, uint64_t maxSize, uint64_t& usedSize) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { usedSize = 0; return false; } uint64_t messageSize = computeSerializedSizeInWords(message) * sizeof(uint64_t); if (maxSize < messageSize) { NVBLAST_LOG_ERROR("When attempting to serialize into an existing buffer, the provided buffer was too small."); usedSize = 0; return false; } kj::ArrayPtr<unsigned char> outputBuffer(buffer, maxSize); kj::ArrayOutputStream outputStream(outputBuffer); capnp::writeMessage(outputStream, message); usedSize = messageSize; return true; } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> bool ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializeIntoBuffer(const TObject *object, unsigned char*& buffer, uint64_t& size, ExtSerialization::BufferProvider* bufferProvider, uint64_t offset) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { buffer = nullptr; size = 0; return false; } const uint64_t blockSize = computeSerializedSizeInWords(message) * sizeof(uint64_t); size = blockSize + offset; buffer = static_cast<unsigned char *>(bufferProvider != nullptr ? bufferProvider->requestBuffer(size) : NVBLAST_ALLOC(size)); kj::ArrayPtr<unsigned char> outputBuffer(buffer + offset, blockSize); kj::ArrayOutputStream outputStream(outputBuffer); capnp::writeMessage(outputStream, message); return true; } template<typename TObject, typename TSerializationReader, typename TSerializationBuilder> bool ExtSerializationCAPN<TObject, TSerializationReader, TSerializationBuilder>::serializeIntoStream(const TObject* object, std::ostream& outputStream) { capnp::MallocMessageBuilder message; bool result = serializeIntoMessage(message, object); if (result == false) { return false; } ExtOutputStream blastOutputStream(outputStream); writeMessage(blastOutputStream, message); return true; } } // namespace Blast } // namespace Nv
6,870
C
35.547872
230
0.760844
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerialization.h" #include "NvBlastExtLlSerialization.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include "NvBlastExtSerializationInternal.h" namespace Nv { namespace Blast { class ExtSerializationImpl : public ExtSerializationInternal { public: // Default buffer provider class AllocBufferProvider : public ExtSerialization::BufferProvider { public: virtual void* requestBuffer(size_t size) override; }; ExtSerializationImpl(); ~ExtSerializationImpl(); // ExtSerialization interface begin virtual bool setSerializationEncoding(uint32_t encodingID) override; virtual uint32_t getSerializationEncoding() const override; virtual void setBufferProvider(BufferProvider* bufferProvider) override; virtual bool peekHeader(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const void* buffer, uint64_t bufferSize) override; virtual const void* skipObject(uint64_t& bufferSize, const void* buffer) override; virtual void* deserializeFromBuffer(const void* buffer, uint64_t size, uint32_t* objectTypeIDPtr = nullptr) override; virtual uint64_t serializeIntoBuffer(void*& buffer, const void* object, uint32_t objectTypeID) override; virtual void release() override; // ExtSerialization interface end // ExtSerializationInternal interface begin virtual bool registerSerializer(ExtSerializer& serializer) override; virtual bool unregisterSerializer(ExtSerializer& serializer) override; virtual ExtSerializer* findSerializer(uint32_t objectTypeID, uint32_t encodingID) override; // ExtSerializationInternal interface end private: char* writeHeaderIntoBuffer(char* buffer, uint64_t bufferSize, uint32_t objectTypeID, uint32_t encodingID, uint64_t dataSize) const; const char* readHeaderFromBuffer(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const char* buffer, uint64_t bufferSize) const; //// Static data //// static const char* s_identifier; static const char* s_version; static AllocBufferProvider s_defaultBufferProvider; //// Member data //// HashMap<uint64_t, ExtSerializer*>::type m_serializers; uint32_t m_serializationEncoding; BufferProvider* m_bufferProvider; }; //////// ExtSerializationImpl static member variables //////// /** Module identifying header. This should never change. */ const char* ExtSerializationImpl::s_identifier = "NVidia(r) GameWorks Blast(tm) v."; const char* ExtSerializationImpl::s_version = "1"; ExtSerializationImpl::AllocBufferProvider ExtSerializationImpl::s_defaultBufferProvider; //////// Local utility functions //////// static NV_INLINE uint64_t generateKey(uint32_t objectTypeID, uint32_t encodingID) { return static_cast<uint64_t>(encodingID) << 32 | static_cast<uint64_t>(objectTypeID); } static NV_INLINE uint64_t generateKey(const ExtSerializer& serializer) { return generateKey(serializer.getObjectTypeID(), serializer.getEncodingID()); } static NV_INLINE void writeIDToBuffer(char* buffer, uint32_t id) { for (int i = 0; i < 4; ++i, id >>= 8) { *buffer++ = static_cast<char>(id & 0xFF); } } static NV_INLINE uint32_t readIDFromBuffer(const char* buffer) { return NVBLAST_FOURCC(buffer[0], buffer[1], buffer[2], buffer[3]); } static NV_INLINE void writeU64InHexToBuffer(char* buffer, uint64_t val) { for (char* curr = buffer + 16; curr-- > buffer; val >>= 4) { *curr = "0123456789ABCDEF"[val & 0xF]; } } static NV_INLINE uint64_t readU64InHexFromBuffer(const char* buffer) { uint64_t val = 0; for (const char* curr = buffer; curr < buffer + 16; ++curr) { const char c = *curr; const char msn = c >> 4; const char mask = ((88 >> msn) & 1) - 1; const unsigned char digit = "\x0\x1\x2\x3\x4\x5\x6\x7\x8\x9\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xA\xB\xC\xD\xE\xF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"[((msn - 3) & 1) << 4 | (c & 0xF)] | mask; if (digit == 0xFF) { return 0; // Not a hexidecimal digit } val = val << 4 | digit; } return val; } //////// ExtSerialization member functions //////// ExtSerializationImpl::ExtSerializationImpl() : m_serializationEncoding(EncodingID::CapnProtoBinary), m_bufferProvider(&s_defaultBufferProvider) { } ExtSerializationImpl::~ExtSerializationImpl() { // Release and remove all registered serializers Array<ExtSerializer*>::type registeredSerializers; registeredSerializers.reserve(m_serializers.size()); for (auto it = m_serializers.getIterator(); !it.done(); ++it) { registeredSerializers.pushBack(it->second); } m_serializers.clear(); for (uint32_t i = 0; i < registeredSerializers.size(); ++i) { registeredSerializers[i]->release(); } } char* ExtSerializationImpl::writeHeaderIntoBuffer(char* buffer, uint64_t bufferSize, uint32_t objectTypeID, uint32_t encodingID, uint64_t dataSize) const { if (bufferSize < HeaderSize) { return nullptr; } char* stop = buffer + HeaderSize; size_t versionLen = strlen(s_version); if (versionLen > 63) { versionLen = 63; } memset(buffer, ' ', HeaderSize); memcpy(buffer, s_identifier, 32); buffer += 32; memcpy(buffer, s_version, versionLen); buffer += 64; writeIDToBuffer(buffer, objectTypeID); buffer += 5; writeIDToBuffer(buffer, encodingID); buffer += 5; writeU64InHexToBuffer(buffer, dataSize); buffer += 16; *(stop - 1) = '\n'; return stop; } const char* ExtSerializationImpl::readHeaderFromBuffer(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const char* buffer, uint64_t bufferSize) const { if (bufferSize < HeaderSize) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: header terminator not found."); return nullptr; } const char* stop = buffer + HeaderSize; if (memcmp(buffer, s_identifier, 32)) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: file identifier does not match expected value."); return nullptr; } buffer += 32; const char* s = strchr(buffer, ' '); if (s == nullptr) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: file format error reading serializer library version."); } if (memcmp(buffer, s_version, s - buffer)) { NVBLAST_LOG_ERROR("ExtSerializationImpl::readHeaderFromBuffer: file version does not match serializer library version."); return nullptr; } buffer += 64; if (objectTypeID != nullptr) { *objectTypeID = readIDFromBuffer(buffer); } buffer += 5; if (encodingID != nullptr) { *encodingID = readIDFromBuffer(buffer); } buffer += 5; if (dataSize != nullptr) { *dataSize = readU64InHexFromBuffer(buffer); } buffer += 16; return stop; } bool ExtSerializationImpl::registerSerializer(ExtSerializer& serializer) { return m_serializers.insert(generateKey(serializer), &serializer); } bool ExtSerializationImpl::unregisterSerializer(ExtSerializer& serializer) { const uint64_t key = generateKey(serializer); const auto entry = m_serializers.find(key); if (entry == nullptr) { return false; } entry->second->release(); return m_serializers.erase(key); } ExtSerializer* ExtSerializationImpl::findSerializer(uint32_t objectTypeID, uint32_t encodingID) { auto entry = m_serializers.find(generateKey(objectTypeID, encodingID)); return entry != nullptr ? entry->second : nullptr; } bool ExtSerializationImpl::setSerializationEncoding(uint32_t encodingID) { m_serializationEncoding = encodingID; return true; } uint32_t ExtSerializationImpl::getSerializationEncoding() const { return m_serializationEncoding; } void ExtSerializationImpl::setBufferProvider(BufferProvider* bufferProvider) { m_bufferProvider = bufferProvider != nullptr ? bufferProvider : &s_defaultBufferProvider; } bool ExtSerializationImpl::peekHeader(uint32_t* objectTypeID, uint32_t* encodingID, uint64_t* dataSize, const void* buffer, uint64_t bufferSize) { return nullptr != readHeaderFromBuffer(objectTypeID, encodingID, dataSize, reinterpret_cast<const char*>(buffer), bufferSize); } const void* ExtSerializationImpl::skipObject(uint64_t& bufferSize, const void* buffer) { uint64_t dataSize; const char* next = readHeaderFromBuffer(nullptr, nullptr, &dataSize, static_cast<const char*>(buffer), bufferSize); if (next == nullptr) { return nullptr; } next += dataSize; const uint64_t skipSize = next - static_cast<const char*>(buffer); NVBLAST_CHECK_ERROR(skipSize <= bufferSize, "Object size in buffer is too large for given buffer size.", return nullptr); bufferSize -= skipSize; return next; } void* ExtSerializationImpl::deserializeFromBuffer(const void* buffer, uint64_t bufferSize, uint32_t* objectTypeIDPtr) { uint32_t objectTypeID; uint32_t encodingID; uint64_t dataSize; void* result = nullptr; buffer = readHeaderFromBuffer(&objectTypeID, &encodingID, &dataSize, reinterpret_cast<const char*>(buffer), bufferSize); if (buffer != nullptr) { auto entry = m_serializers.find(generateKey(objectTypeID, encodingID)); if (entry != nullptr && entry->second != nullptr) { result = entry->second->deserializeFromBuffer(buffer, dataSize); } } if (objectTypeIDPtr != nullptr) { *objectTypeIDPtr = result != nullptr ? objectTypeID : 0; } return result; } uint64_t ExtSerializationImpl::serializeIntoBuffer(void*& buffer, const void* object, uint32_t objectTypeID) { if (!m_serializationEncoding) { NVBLAST_LOG_ERROR("ExtSerializationImpl::serializeIntoBuffer: no serialization encoding has been set."); return false; // No encoding available } auto entry = m_serializers.find(generateKey(objectTypeID, m_serializationEncoding)); if (entry == nullptr || entry->second == nullptr) { return false; } const uint64_t size = entry->second->serializeIntoBuffer(buffer, *m_bufferProvider, object, HeaderSize); if (size < HeaderSize) { NVBLAST_LOG_ERROR("ExtSerializationImpl::serializeIntoBuffer: failed to write data to buffer."); return 0; } writeHeaderIntoBuffer(reinterpret_cast<char*>(buffer), HeaderSize, objectTypeID, m_serializationEncoding, size - HeaderSize); return size; } void ExtSerializationImpl::release() { NVBLAST_DELETE(this, ExtSerializationImpl); } //////// ExtSerializationImpl::AllocBufferProvider member functions //////// void* ExtSerializationImpl::AllocBufferProvider::requestBuffer(size_t size) { return NVBLAST_ALLOC(size); } } // namespace Blast } // namespace Nv Nv::Blast::ExtSerialization* NvBlastExtSerializationCreate() { Nv::Blast::ExtSerializationImpl* serialization = NVBLAST_NEW(Nv::Blast::ExtSerializationImpl) (); // Automatically load LL serializers NvBlastExtLlSerializerLoadSet(*serialization); return serialization; }
13,068
C++
31.031863
192
0.6897
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtInputStream.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "kj/io.h" #include <istream> namespace Nv { namespace Blast { class ExtInputStream : public kj::InputStream { public: ExtInputStream() = delete; ExtInputStream(std::istream &inputStream); // Returns a read of maxBytes. This is supposed to be happy doing partial reads, but currently isn't. virtual size_t tryRead(void* buffer, size_t minBytes, size_t maxBytes) override; private: std::istream &m_inputStream; }; } // namespace Blast } // namespace Nv
2,064
C
37.962263
106
0.75436
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtLlSerializerCAPN.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtSerializationCAPN.h" #include "NvBlastAsset.h" #include "NvBlastFamily.h" #include "AssetDTO.h" #include "FamilyDTO.h" /** Specializations of ExtSerializationCAPN for Blast LL */ namespace Nv { namespace Blast { //// Nv::Blast::Asset //// template<> NV_INLINE bool ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::serializeIntoBuilder(Serialization::Asset::Builder& assetBuilder, const Asset* asset) { return AssetDTO::serialize(assetBuilder, asset); } template<> NV_INLINE bool ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::serializeIntoMessage(capnp::MallocMessageBuilder& message, const Asset* asset) { Serialization::Asset::Builder assetBuilder = message.initRoot<Serialization::Asset>(); return serializeIntoBuilder(assetBuilder, asset); } template<> NV_INLINE Asset* ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::deserializeFromStreamReader(capnp::InputStreamMessageReader &message) { Serialization::Asset::Reader reader = message.getRoot<Serialization::Asset>(); return AssetDTO::deserialize(reader); } //// Nv::Blast::FamilyHeader //// template<> NV_INLINE bool ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::serializeIntoBuilder(Serialization::Family::Builder& familyBuilder, const FamilyHeader* family) { return FamilyDTO::serialize(familyBuilder, family); } template<> NV_INLINE bool ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::serializeIntoMessage(capnp::MallocMessageBuilder& message, const FamilyHeader* family) { Serialization::Family::Builder familyBuilder = message.initRoot<Serialization::Family>(); return serializeIntoBuilder(familyBuilder, family); } template<> NV_INLINE FamilyHeader* ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::deserializeFromStreamReader(capnp::InputStreamMessageReader &message) { Serialization::Family::Reader reader = message.getRoot<Serialization::Family>(); return FamilyDTO::deserialize(reader); } } // namespace Blast } // namespace Nv
3,837
C
37.38
209
0.775085
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtTkSerializerCAPN.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastExtSerializationCAPN.h" #include "NvBlastTkAsset.h" #include "TkAssetDTO.h" /** Specializations of ExtSerializationCAPN for BlastTk */ namespace Nv { namespace Blast { //// Nv::Blast::TkAsset //// template<> NV_INLINE bool ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::serializeIntoBuilder(Serialization::TkAsset::Builder& assetBuilder, const TkAsset* asset) { return TkAssetDTO::serialize(assetBuilder, asset); } template<> NV_INLINE TkAsset* ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::deserializeFromStreamReader(capnp::InputStreamMessageReader &message) { Serialization::TkAsset::Reader reader = message.getRoot<Serialization::TkAsset>(); return TkAssetDTO::deserialize(reader); } template<> NV_INLINE bool ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::serializeIntoMessage(capnp::MallocMessageBuilder& message, const TkAsset* asset) { Serialization::TkAsset::Builder assetBuilder = message.initRoot<Serialization::TkAsset>(); return serializeIntoBuilder(assetBuilder, asset); } } // namespace Blast } // namespace Nv
2,803
C
39.057142
200
0.7731
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtTkSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerializationInternal.h" #include "NvBlastExtTkSerialization.h" #include "NvBlastExtTkSerializerCAPN.h" #include "NvBlastExtTkSerializerRAW.h" namespace Nv { namespace Blast { TkFramework* sExtTkSerializerFramework = nullptr; class ExtTkSerializerAsset_CPNB : public ExtSerializer { public: ExtSerializerBoilerplate("TkAsset_CPNB", "Blast high-level asset (Nv::Blast::TkAsset) serialization using Cap'n Proto binary format.", TkObjectTypeID::Asset, ExtSerialization::EncodingID::CapnProtoBinary); ExtSerializerDefaultFactoryAndRelease(ExtTkSerializerAsset_CPNB); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { return ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::deserializeFromBuffer(reinterpret_cast<const unsigned char*>(buffer), size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { uint64_t usedSize; if (!ExtSerializationCAPN<TkAsset, Serialization::TkAsset::Reader, Serialization::TkAsset::Builder>::serializeIntoBuffer(reinterpret_cast<const TkAsset*>(object), reinterpret_cast<unsigned char*&>(buffer), usedSize, &bufferProvider, offset)) { return 0; } return usedSize; } }; class ExTkSerializerAsset_RAW : public ExtSerializer { public: ExtSerializerBoilerplate("TkAsset_RAW", "Blast high-level asset (Nv::Blast::TkAsset) serialization using raw memory format.", TkObjectTypeID::Asset, ExtSerialization::EncodingID::RawBinary); ExtSerializerDefaultFactoryAndRelease(ExTkSerializerAsset_RAW); ExtSerializerReadOnly(ExTkSerializerAsset_RAW); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { ExtIStream stream(buffer, size); return deserializeTkAsset(stream, *sExtTkSerializerFramework); } }; } // namespace Blast } // namespace Nv /////////////////////////////////////// size_t NvBlastExtTkSerializerLoadSet(Nv::Blast::TkFramework& framework, Nv::Blast::ExtSerialization& serialization) { Nv::Blast::sExtTkSerializerFramework = &framework; Nv::Blast::ExtSerializer* (*factories[])() = { Nv::Blast::ExtTkSerializerAsset_CPNB::create, Nv::Blast::ExTkSerializerAsset_RAW::create }; return Nv::Blast::ExtSerializationLoadSet(static_cast<Nv::Blast::ExtSerializationInternal&>(serialization), factories); } uint64_t NvBlastExtSerializationSerializeTkAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const Nv::Blast::TkAsset* asset) { return serialization.serializeIntoBuffer(buffer, asset, Nv::Blast::TkObjectTypeID::Asset); }
4,373
C++
40.657142
209
0.750057
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtTkSerializerRAW.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once /** Raw serialization function declarations for BlastTk */ #include <stdint.h> namespace Nv { namespace Blast { // Forward declarations class TkAsset; class TkFramework; class ExtIStream; //// Nv::Blast::TkAsset //// TkAsset* deserializeTkAsset(ExtIStream& stream, TkFramework& framework); } // namespace Blast } // namespace Nv
1,922
C
34.61111
75
0.759105
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtOutputStream.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtOutputStream.h" Nv::Blast::ExtOutputStream::ExtOutputStream(std::ostream &outputStream): m_outputStream(outputStream) { } void Nv::Blast::ExtOutputStream::write(const void* buffer, size_t size) { m_outputStream.write((char *) buffer, size); }
1,839
C++
43.878048
74
0.762915
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/NvBlastExtLlSerialization.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtSerializationInternal.h" #include "NvBlastExtLlSerialization.h" #include "NvBlastExtLlSerializerCAPN.h" namespace Nv { namespace Blast { class ExtLlSerializerAsset_CPNB : public ExtSerializer { public: ExtSerializerBoilerplate("LLAsset_CPNB", "Blast low-level asset (NvBlastAsset) serialization using Cap'n Proto binary format.", LlObjectTypeID::Asset, ExtSerialization::EncodingID::CapnProtoBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerAsset_CPNB); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { return ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::deserializeFromBuffer(reinterpret_cast<const unsigned char*>(buffer), size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { uint64_t usedSize; if (!ExtSerializationCAPN<Asset, Serialization::Asset::Reader, Serialization::Asset::Builder>::serializeIntoBuffer(reinterpret_cast<const Asset*>(object), reinterpret_cast<unsigned char*&>(buffer), usedSize, &bufferProvider, offset)) { return 0; } return usedSize; } }; class ExtLlSerializerFamily_CPNB : public ExtSerializer { public: ExtSerializerBoilerplate("LLFamily_CPNB", "Blast low-level family (NvBlastFamily) serialization using Cap'n Proto binary format.", LlObjectTypeID::Family, ExtSerialization::EncodingID::CapnProtoBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerFamily_CPNB); virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { return ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::deserializeFromBuffer(reinterpret_cast<const unsigned char*>(buffer), size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { uint64_t usedSize; if (!ExtSerializationCAPN<FamilyHeader, Serialization::Family::Reader, Serialization::Family::Builder>::serializeIntoBuffer(reinterpret_cast<const FamilyHeader*>(object), reinterpret_cast<unsigned char*&>(buffer), usedSize, &bufferProvider, offset)) { return 0; } return usedSize; } }; class ExtLlSerializerObject_RAW : public ExtSerializer { public: virtual void* deserializeFromBuffer(const void* buffer, uint64_t size) override { const NvBlastDataBlock* block = reinterpret_cast<const NvBlastDataBlock*>(buffer); if (static_cast<uint64_t>(block->size) > size) { return nullptr; } void* llobject = NVBLAST_ALLOC(block->size); return memcpy(llobject, block, block->size); } virtual uint64_t serializeIntoBuffer(void*& buffer, ExtSerialization::BufferProvider& bufferProvider, const void* object, uint64_t offset = 0) override { const NvBlastDataBlock* block = reinterpret_cast<const NvBlastDataBlock*>(object); const uint64_t size = block->size + offset; buffer = bufferProvider.requestBuffer(size); if (buffer == nullptr) { return 0; } memcpy(static_cast<char*>(buffer) + offset, object, block->size); return size; } }; class ExtLlSerializerAsset_RAW : public ExtLlSerializerObject_RAW { public: ExtSerializerBoilerplate("LLAsset_RAW", "Blast low-level asset (NvBlastAsset) serialization using raw memory format.", LlObjectTypeID::Asset, ExtSerialization::EncodingID::RawBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerAsset_RAW); }; class ExtLlSerializerFamily_RAW : public ExtLlSerializerObject_RAW { public: ExtSerializerBoilerplate("LLFamily_RAW", "Blast low-level family (NvBlastFamily) serialization using raw memory format.", LlObjectTypeID::Family, ExtSerialization::EncodingID::RawBinary); ExtSerializerDefaultFactoryAndRelease(ExtLlSerializerFamily_RAW); }; } // namespace Blast } // namespace Nv /////////////////////////////////////// size_t NvBlastExtLlSerializerLoadSet(Nv::Blast::ExtSerialization& serialization) { Nv::Blast::ExtSerializer* (*factories[])() = { Nv::Blast::ExtLlSerializerAsset_CPNB::create, Nv::Blast::ExtLlSerializerAsset_RAW::create, Nv::Blast::ExtLlSerializerFamily_CPNB::create, Nv::Blast::ExtLlSerializerFamily_RAW::create }; return Nv::Blast::ExtSerializationLoadSet(static_cast<Nv::Blast::ExtSerializationInternal&>(serialization), factories); } uint64_t NvBlastExtSerializationSerializeAssetIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastAsset* asset) { return serialization.serializeIntoBuffer(buffer, asset, Nv::Blast::LlObjectTypeID::Asset); } uint64_t NvBlastExtSerializationSerializeFamilyIntoBuffer(void*& buffer, Nv::Blast::ExtSerialization& serialization, const NvBlastFamily* family) { return serialization.serializeIntoBuffer(buffer, family, Nv::Blast::LlObjectTypeID::Family); }
6,780
C++
41.118012
206
0.737316
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastBondDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastTypes.h" #include "NvBlastExtLlSerialization-capn.h" DTO_CLASS(NvBlastBond, NvBlastBond, Nv::Blast::Serialization::NvBlastBond)
1,746
C
50.382351
74
0.771478
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvVec3DTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastExtTkSerialization-capn.h" #include "NvVec3.h" DTO_CLASS(NvVec3, nvidia::NvVec3, Nv::Blast::Serialization::NvVec3)
1,733
C
49.999999
74
0.768609
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/ActorDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "ActorDTO.h" #include "NvBlastGlobals.h" #include "NvBlastIDDTO.h" #include "NvBlastChunkDTO.h" #include "NvBlastBondDTO.h" namespace Nv { namespace Blast { bool ActorDTO::serialize(Nv::Blast::Serialization::Actor::Builder builder, const Nv::Blast::Actor* poco) { builder.setFamilyOffset(poco->getFamilyOffset()); builder.setFirstVisibleChunkIndex(poco->getFirstVisibleChunkIndex()); builder.setVisibleChunkCount(poco->getVisibleChunkCount()); builder.setFirstGraphNodeIndex(poco->getFirstGraphNodeIndex()); builder.setGraphNodeCount(poco->getGraphNodeCount()); builder.setLeafChunkCount(poco->getLeafChunkCount()); return true; } Nv::Blast::Actor* ActorDTO::deserialize(Nv::Blast::Serialization::Actor::Reader reader) { NV_UNUSED(reader); return nullptr; } bool ActorDTO::deserializeInto(Nv::Blast::Serialization::Actor::Reader reader, Nv::Blast::Actor* poco) { poco->setFamilyOffset(reader.getFamilyOffset()); poco->setFirstVisibleChunkIndex(reader.getFirstVisibleChunkIndex()); poco->setVisibleChunkCount(reader.getVisibleChunkCount()); poco->setFirstGraphNodeIndex(reader.getFirstGraphNodeIndex()); poco->setGraphNodeCount(reader.getGraphNodeCount()); poco->setLeafChunkCount(reader.getLeafChunkCount()); return true; } } // namespace Blast } // namespace Nv
2,914
C++
38.391891
104
0.762183
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxTransformDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "PxTransform.h" #include "NvBlastExtPxSerialization-capn.h" #include "PxCooking.h" DTO_CLASS(PxTransform, physx::PxTransform, Nv::Blast::Serialization::PxTransform)
1,775
C
49.742856
81
0.770704
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxMeshScaleDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "PxMeshScaleDTO.h" #include "PxVec3DTO.h" #include "PxQuatDTO.h" namespace Nv { namespace Blast { bool PxMeshScaleDTO::serialize(Nv::Blast::Serialization::PxMeshScale::Builder builder, const physx::PxMeshScale * poco) { PxVec3DTO::serialize(builder.getScale(), &poco->scale); PxQuatDTO::serialize(builder.getRotation(), &poco->rotation); return true; } physx::PxMeshScale* PxMeshScaleDTO::deserialize(Nv::Blast::Serialization::PxMeshScale::Reader reader) { NV_UNUSED(reader); return nullptr; } bool PxMeshScaleDTO::deserializeInto(Nv::Blast::Serialization::PxMeshScale::Reader reader, physx::PxMeshScale * poco) { PxVec3DTO::deserializeInto(reader.getScale(), &poco->scale); PxQuatDTO::deserializeInto(reader.getRotation(), &poco->rotation); return true; } } // namespace Blast } // namespace Nv
2,443
C++
36.599999
119
0.75481
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxConvexMeshGeometryDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "PxConvexMeshGeometryDTO.h" #include "PxMeshScaleDTO.h" #include "NvBlastAssert.h" #include "NvBlastExtKJPxInputStream.h" #include "NvBlastExtKJPxOutputStream.h" #include "PxConvexMeshDesc.h" #include "NvBlastExtSerialization.h" #include "PxVec3.h" #include <algorithm> #include <vector> #include "PxPhysics.h" #include "NvBlastPxCallbacks.h" #include "PxDefaultStreams.h" namespace Nv { namespace Blast { extern physx::PxPhysics* sExtPxSerializerPhysics; extern physx::PxCooking* sExtPxSerializerCooking; bool PxConvexMeshGeometryDTO::serialize(Nv::Blast::Serialization::PxConvexMeshGeometry::Builder builder, const physx::PxConvexMeshGeometry * poco) { NVBLAST_ASSERT(sExtPxSerializerCooking != nullptr); PxMeshScaleDTO::serialize(builder.getScale(), &poco->scale); //TODO: Use cooking.cookConvexMesh to cook the mesh to a stream - then get that backing buffer and put it into the Data field physx::PxConvexMeshDesc desc; desc.points.data = poco->convexMesh->getVertices(); desc.points.count = poco->convexMesh->getNbVertices(); desc.points.stride = sizeof(physx::PxVec3); std::vector<uint32_t> indicesScratch; std::vector<physx::PxHullPolygon> hullPolygonsScratch; hullPolygonsScratch.resize(poco->convexMesh->getNbPolygons()); uint32_t indexCount = 0; for (uint32_t i = 0; i < hullPolygonsScratch.size(); i++) { physx::PxHullPolygon polygon; poco->convexMesh->getPolygonData(i, polygon); if (polygon.mNbVerts) { indexCount = std::max<uint32_t>(indexCount, polygon.mIndexBase + polygon.mNbVerts); } } indicesScratch.resize(indexCount); for (uint32_t i = 0; i < hullPolygonsScratch.size(); i++) { physx::PxHullPolygon polygon; poco->convexMesh->getPolygonData(i, polygon); for (uint32_t j = 0; j < polygon.mNbVerts; j++) { indicesScratch[polygon.mIndexBase + j] = poco->convexMesh->getIndexBuffer()[polygon.mIndexBase + j]; } hullPolygonsScratch[i] = polygon; } desc.indices.count = indexCount; desc.indices.data = indicesScratch.data(); desc.indices.stride = sizeof(uint32_t); desc.polygons.count = poco->convexMesh->getNbPolygons(); desc.polygons.data = hullPolygonsScratch.data(); desc.polygons.stride = sizeof(physx::PxHullPolygon); physx::PxDefaultMemoryOutputStream outStream(NvBlastGetPxAllocatorCallback()); if (!sExtPxSerializerCooking->cookConvexMesh(desc, outStream)) { return false; } kj::ArrayPtr<unsigned char> cookedBuffer(outStream.getData(), outStream.getSize()); builder.setConvexMesh(cookedBuffer); return true; } physx::PxConvexMeshGeometry* PxConvexMeshGeometryDTO::deserialize(Nv::Blast::Serialization::PxConvexMeshGeometry::Reader reader) { NVBLAST_ASSERT(sExtPxSerializerCooking != nullptr); NV_UNUSED(reader); return nullptr; } bool PxConvexMeshGeometryDTO::deserializeInto(Nv::Blast::Serialization::PxConvexMeshGeometry::Reader reader, physx::PxConvexMeshGeometry * poco) { NVBLAST_ASSERT(sExtPxSerializerPhysics != nullptr); PxMeshScaleDTO::deserializeInto(reader.getScale(), &poco->scale); Nv::Blast::ExtKJPxInputStream inputStream(reader.getConvexMesh()); //NOTE: Naive approach, no shared convex hulls poco->convexMesh = sExtPxSerializerPhysics->createConvexMesh(inputStream); return poco->convexMesh != nullptr; } } // namespace Blast } // namespace Nv
5,081
C++
34.788732
146
0.731943
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/TkAssetJointDescDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "TkAssetJointDescDTO.h" #include "NvVec3DTO.h" namespace Nv { namespace Blast { bool TkAssetJointDescDTO::serialize(Nv::Blast::Serialization::TkAssetJointDesc::Builder builder, const Nv::Blast::TkAssetJointDesc * poco) { kj::ArrayPtr<const uint32_t> nodeIndices(poco->nodeIndices, 2); builder.setNodeIndices(nodeIndices); builder.initAttachPositions(2); for (int i = 0; i < 2; i++) { NvVec3DTO::serialize(builder.getAttachPositions()[i], &poco->attachPositions[i]); } return true; } Nv::Blast::TkAssetJointDesc* TkAssetJointDescDTO::deserialize(Nv::Blast::Serialization::TkAssetJointDesc::Reader reader) { //TODO: Allocate with ExtContent and return NV_UNUSED(reader); return nullptr; } bool TkAssetJointDescDTO::deserializeInto(Nv::Blast::Serialization::TkAssetJointDesc::Reader reader, Nv::Blast::TkAssetJointDesc * poco) { auto readerAttachPositions = reader.getAttachPositions(); NvVec3DTO::deserializeInto(readerAttachPositions[0], &poco->attachPositions[0]); NvVec3DTO::deserializeInto(readerAttachPositions[1], &poco->attachPositions[1]); auto readerNodeIndices = reader.getNodeIndices(); poco->nodeIndices[0] = readerNodeIndices[0]; poco->nodeIndices[1] = readerNodeIndices[1]; return true; } } // namespace Blast } // namespace Nv
2,911
C++
36.818181
138
0.74854
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/TkAssetDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastTkAsset.h" #include "NvBlastExtTkSerialization-capn.h" DTO_CLASS(TkAsset, Nv::Blast::TkAsset, Nv::Blast::Serialization::TkAsset)
1,747
C
50.411763
74
0.769319
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/AssetDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastAsset.h" #include "NvBlastExtLlSerialization-capn.h" DTO_CLASS(Asset, Nv::Blast::Asset, Nv::Blast::Serialization::Asset)
1,739
C
50.176469
74
0.768258
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxQuatDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "PxQuat.h" #include "NvBlastExtPxSerialization-capn.h" #include "PxCooking.h" DTO_CLASS(PxQuat, physx::PxQuat, Nv::Blast::Serialization::PxQuat)
1,755
C
49.171427
74
0.768091
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/AssetDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "AssetDTO.h" #include "NvBlastGlobals.h" #include "NvBlastIDDTO.h" #include "NvBlastChunkDTO.h" #include "NvBlastBondDTO.h" #include "NvBlastAsset.h" namespace Nv { namespace Blast { bool AssetDTO::serialize(Nv::Blast::Serialization::Asset::Builder builder, const Nv::Blast::Asset * poco) { NvBlastIDDTO::serialize(builder.initID(), &poco->m_ID); builder.setLeafChunkCount(poco->m_leafChunkCount); builder.setFirstSubsupportChunkIndex(poco->m_firstSubsupportChunkIndex); capnp::List<Nv::Blast::Serialization::NvBlastChunk>::Builder chunks = builder.initChunks(poco->m_chunkCount); builder.setChunkCount(poco->m_chunkCount); NVBLAST_ASSERT_WITH_MESSAGE(builder.getChunkCount() == poco->m_chunkCount, "WTF"); for (uint32_t i = 0; i < poco->m_chunkCount; i++) { NvBlastChunk& chunk = poco->getChunks()[i]; NvBlastChunkDTO::serialize(chunks[i], &chunk); } NVBLAST_ASSERT_WITH_MESSAGE(builder.getChunkCount() == poco->m_chunkCount, "WTF"); capnp::List<Nv::Blast::Serialization::NvBlastBond>::Builder bonds = builder.initBonds(poco->m_bondCount); builder.setBondCount(poco->m_bondCount); for (uint32_t i = 0; i < poco->m_bondCount; i++) { NvBlastBond& bond = poco->getBonds()[i]; NvBlastBondDTO::serialize(bonds[i], &bond); } kj::ArrayPtr<uint32_t> stlcArray(poco->getSubtreeLeafChunkCounts(), poco->m_chunkCount); builder.initSubtreeLeafChunkCounts(poco->m_chunkCount); builder.setSubtreeLeafChunkCounts(stlcArray); kj::ArrayPtr<uint32_t> ctgnArray(poco->getChunkToGraphNodeMap(), poco->m_chunkCount); builder.setChunkToGraphNodeMap(ctgnArray); Nv::Blast::Serialization::NvBlastSupportGraph::Builder graphBulder = builder.initGraph(); graphBulder.setNodeCount(poco->m_graph.m_nodeCount); uint32_t* ciPtr = poco->m_graph.getChunkIndices(); kj::ArrayPtr<const uint32_t> ciArray(ciPtr, poco->m_graph.m_nodeCount); graphBulder.setChunkIndices(ciArray); kj::ArrayPtr<const uint32_t> adjPart(poco->m_graph.getAdjacencyPartition(), poco->m_graph.m_nodeCount + 1); graphBulder.setAdjacencyPartition(adjPart); NVBLAST_ASSERT(graphBulder.getAdjacencyPartition().size() == poco->m_graph.m_nodeCount + 1); kj::ArrayPtr<const uint32_t> nodeIndices(poco->m_graph.getAdjacentNodeIndices(), poco->m_bondCount * 2); graphBulder.setAdjacentNodeIndices(nodeIndices); NVBLAST_ASSERT(graphBulder.getAdjacentNodeIndices().size() == poco->m_bondCount * 2); kj::ArrayPtr<const uint32_t> bondIndices(poco->m_graph.getAdjacentBondIndices(), poco->m_bondCount * 2); graphBulder.setAdjacentBondIndices(bondIndices); return true; } Nv::Blast::Asset* AssetDTO::deserialize(Nv::Blast::Serialization::Asset::Reader reader) { NvBlastAssetMemSizeData sizeData; sizeData.chunkCount = reader.getChunkCount(); sizeData.nodeCount = reader.getGraph().getNodeCount(); sizeData.bondCount = reader.getBondCount(); const uint32_t leafChunkCount = reader.getLeafChunkCount(); const uint32_t firstSubsupportChunkIndex = reader.getFirstSubsupportChunkIndex(); const size_t assetSize = NvBlastGetAssetMemorySizeFromSizeData(sizeData, nullptr); void* mem = NVBLAST_ALLOC(assetSize); auto asset = Nv::Blast::initializeAsset(mem, sizeData.chunkCount, sizeData.nodeCount, leafChunkCount, firstSubsupportChunkIndex, sizeData.bondCount, logLL); if (deserializeInto(reader, asset)) return asset; // free the memory so it doesn't leak NVBLAST_FREE(asset); return nullptr; } bool AssetDTO::deserializeInto(Nv::Blast::Serialization::Asset::Reader reader, Nv::Blast::Asset * poco) { NvBlastIDDTO::deserializeInto(reader.getID(), &poco->m_ID); NvBlastBond* bonds = poco->getBonds(); uint32_t bondCount = reader.getBondCount(); auto readerBonds = reader.getBonds(); for (uint32_t i = 0; i < bondCount; i++) { auto bondReader = readerBonds[i]; NvBlastBondDTO::deserializeInto(bondReader, &bonds[i]); } NvBlastChunk* chunks = poco->getChunks(); uint32_t chunkCount = reader.getChunkCount(); auto readerChunks = reader.getChunks(); for (uint32_t i = 0; i < chunkCount; i++) { auto chunkReader = readerChunks[i]; NvBlastChunkDTO::deserializeInto(chunkReader, &chunks[i]); } poco->m_graph.m_nodeCount = reader.getGraph().getNodeCount(); NVBLAST_ASSERT(reader.getSubtreeLeafChunkCounts().size() == poco->m_chunkCount); auto readerSubtreeLeafChunkCounts = reader.getSubtreeLeafChunkCounts(); for (uint32_t i = 0; i < poco->m_chunkCount; i++) { poco->getSubtreeLeafChunkCounts()[i] = readerSubtreeLeafChunkCounts[i]; } auto readerChunkToGraphNodeMap = reader.getChunkToGraphNodeMap(); for (uint32_t i = 0; i < chunkCount; i++) { poco->getChunkToGraphNodeMap()[i] = readerChunkToGraphNodeMap[i]; } uint32_t* ciPtr = poco->m_graph.getChunkIndices(); NVBLAST_ASSERT(reader.getGraph().getChunkIndices().size() == poco->m_graph.m_nodeCount); auto readerGraphChunkIndices = reader.getGraph().getChunkIndices(); for (uint32_t i = 0; i < poco->m_graph.m_nodeCount; i++) { ciPtr[i] = readerGraphChunkIndices[i]; } uint32_t* adjPartition = poco->m_graph.getAdjacencyPartition(); const uint32_t graphAdjacencyPartitionSize = reader.getGraph().getAdjacencyPartition().size(); auto readerGraphAdjacencyPartition = reader.getGraph().getAdjacencyPartition(); for (uint32_t i = 0; i < graphAdjacencyPartitionSize; ++i) { adjPartition[i] = readerGraphAdjacencyPartition[i]; } uint32_t* adjNodes = poco->m_graph.getAdjacentNodeIndices(); const uint32_t graphAdjacentNodeIndicesSize = reader.getGraph().getAdjacentNodeIndices().size(); auto readerGraphAdjacentNodeIndices = reader.getGraph().getAdjacentNodeIndices(); for (uint32_t i = 0; i < graphAdjacentNodeIndicesSize; ++i) { adjNodes[i] = readerGraphAdjacentNodeIndices[i]; } uint32_t* adjBonds = poco->m_graph.getAdjacentBondIndices(); const uint32_t graphAdjacentBondIndicesSize = reader.getGraph().getAdjacentBondIndices().size(); auto readerGraphAdjacentBondIndices = reader.getGraph().getAdjacentBondIndices(); for (uint32_t i = 0; i < graphAdjacentBondIndicesSize; ++i) { adjBonds[i] = readerGraphAdjacentBondIndices[i]; } return true; } } // namespace Blast } // namespace Nv
8,072
C++
37.8125
160
0.718285
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxConvexMeshGeometryDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastExtPxSerialization-capn.h" #include "PxConvexMeshGeometry.h" #include "PxCooking.h" DTO_CLASS(PxConvexMeshGeometry, physx::PxConvexMeshGeometry, Nv::Blast::Serialization::PxConvexMeshGeometry)
1,811
C
50.771427
108
0.775262
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastIDDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "NvBlastTypes.h" #include "NvBlastExtLlSerialization-capn.h" #include "DTOMacros.h" DTO_CLASS(NvBlastID, NvBlastID, ::Nv::Blast::Serialization::UUID)
1,737
C
50.117646
74
0.769142
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/FamilyDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastFamily.h" #include "NvBlastExtLlSerialization-capn.h" DTO_CLASS(Family, Nv::Blast::FamilyHeader, Nv::Blast::Serialization::Family)
1,749
C
50.470587
76
0.769583
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastChunkDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastTypes.h" #include "NvBlastExtLlSerialization-capn.h" DTO_CLASS(NvBlastChunk, NvBlastChunk, Nv::Blast::Serialization::NvBlastChunk)
1,751
C
47.666665
77
0.770988
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/DTOMacros.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #define DTO_CLASS(_NAME, _POCO, _SERIALIZER) \ namespace Nv { \ namespace Blast { \ class _NAME ## DTO \ { \ public: \ \ static bool serialize(_SERIALIZER::Builder builder, const _POCO * poco); \ static _POCO* deserialize(_SERIALIZER::Reader reader); \ static bool deserializeInto(_SERIALIZER::Reader reader, _POCO * poco); \ }; \ } \ }
2,660
C
60.88372
89
0.541729
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxQuatDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "PxQuatDTO.h" namespace Nv { namespace Blast { bool PxQuatDTO::serialize(Nv::Blast::Serialization::PxQuat::Builder builder, const physx::PxQuat * poco) { builder.setX(poco->x); builder.setY(poco->y); builder.setZ(poco->z); builder.setW(poco->w); return true; } physx::PxQuat* PxQuatDTO::deserialize(Nv::Blast::Serialization::PxQuat::Reader reader) { NV_UNUSED(reader); return nullptr; } bool PxQuatDTO::deserializeInto(Nv::Blast::Serialization::PxQuat::Reader reader, physx::PxQuat * poco) { poco->x = reader.getX(); poco->y = reader.getY(); poco->z = reader.getZ(); poco->w = reader.getW(); return true; } } // namespace Blast } // namespace Nv
2,308
C++
33.984848
104
0.733969
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxTransformDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "PxTransformDTO.h" #include "PxQuatDTO.h" #include "PxVec3DTO.h" namespace Nv { namespace Blast { bool PxTransformDTO::serialize(Nv::Blast::Serialization::PxTransform::Builder builder, const physx::PxTransform * poco) { PxQuatDTO::serialize(builder.getQ(), &poco->q); PxVec3DTO::serialize(builder.getP(), &poco->p); return true; } physx::PxTransform* PxTransformDTO::deserialize(Nv::Blast::Serialization::PxTransform::Reader reader) { NV_UNUSED(reader); return nullptr; } bool PxTransformDTO::deserializeInto(Nv::Blast::Serialization::PxTransform::Reader reader, physx::PxTransform * poco) { PxQuatDTO::deserializeInto(reader.getQ(), &poco->q); PxVec3DTO::deserializeInto(reader.getP(), &poco->p); return true; } } // namespace Blast } // namespace Nv
2,406
C++
36.609374
119
0.74813
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastBondDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastBondDTO.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { bool NvBlastBondDTO::serialize(Nv::Blast::Serialization::NvBlastBond::Builder builder, const NvBlastBond * poco) { NVBLAST_ASSERT(poco != nullptr); kj::ArrayPtr<const float> normArray(poco->normal, 3); builder.setNormal(normArray); builder.setArea(poco->area); kj::ArrayPtr<const float> centArray(poco->centroid, 3); builder.setCentroid(centArray); builder.setUserData(poco->userData); return true; } NvBlastBond* NvBlastBondDTO::deserialize(Nv::Blast::Serialization::NvBlastBond::Reader reader) { //FIXME NV_UNUSED(reader); //TODO: Allocate with ExtContext and return return nullptr; } bool NvBlastBondDTO::deserializeInto(Nv::Blast::Serialization::NvBlastBond::Reader reader, NvBlastBond * poco) { poco->area = reader.getArea(); auto readerCentroid = reader.getCentroid(); poco->centroid[0] = readerCentroid[0]; poco->centroid[1] = readerCentroid[1]; poco->centroid[2] = readerCentroid[2]; auto readerNormal = reader.getNormal(); poco->normal[0] = readerNormal[0]; poco->normal[1] = readerNormal[1]; poco->normal[2] = readerNormal[2]; poco->userData = reader.getUserData(); return true; } } // namespace Blast } // namespace Nv
2,896
C++
32.686046
112
0.734116
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/TkAssetDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "TkAssetDTO.h" #include "AssetDTO.h" #include "TkAssetJointDescDTO.h" #include <vector> #include "NvBlastTkFramework.h" #include "NvBlastGlobals.h" namespace Nv { namespace Blast { extern TkFramework* sExtTkSerializerFramework; bool TkAssetDTO::serialize(Nv::Blast::Serialization::TkAsset::Builder builder, const Nv::Blast::TkAsset * poco) { const Asset* assetLL = reinterpret_cast<const Nv::Blast::Asset*>(poco->getAssetLL()); Nv::Blast::AssetDTO::serialize(builder.getAssetLL(), assetLL); uint32_t jointDescCount = poco->getJointDescCount(); capnp::List<Nv::Blast::Serialization::TkAssetJointDesc>::Builder jointDescs = builder.initJointDescs(jointDescCount); for (uint32_t i = 0; i < jointDescCount; i++) { TkAssetJointDescDTO::serialize(jointDescs[i], &poco->getJointDescs()[i]); } return true; } Nv::Blast::TkAsset* TkAssetDTO::deserialize(Nv::Blast::Serialization::TkAsset::Reader reader) { const NvBlastAsset* assetLL = reinterpret_cast<const NvBlastAsset*>(AssetDTO::deserialize(reader.getAssetLL())); std::vector<Nv::Blast::TkAssetJointDesc> jointDescs; const uint32_t jointDescCount = reader.getJointDescs().size(); jointDescs.resize(jointDescCount); auto readerJointDescs = reader.getJointDescs(); for (uint32_t i = 0; i < jointDescCount; i++) { TkAssetJointDescDTO::deserializeInto(readerJointDescs[i], &jointDescs[i]); } // Make sure to set ownsAsset to true - this is serialization and no one else owns it. Nv::Blast::TkAsset* asset = NvBlastTkFrameworkGet()->createAsset(assetLL, jointDescs.data(), jointDescCount, true); return asset; } bool TkAssetDTO::deserializeInto(Nv::Blast::Serialization::TkAsset::Reader reader, Nv::Blast::TkAsset * poco) { NV_UNUSED(reader); poco = nullptr; // NOTE: Because of the way TkAsset is currently structured, this won't work. return false; } } // namespace Blast } // namespace Nv
3,537
C++
36.638297
121
0.737913
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxVec3DTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastExtTkSerialization-capn.h" #include "PxVec3.h" DTO_CLASS(PxVec3, physx::PxVec3, Nv::Blast::Serialization::PxVec3)
1,732
C
49.970587
74
0.768476
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvVec3DTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvVec3DTO.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { bool NvVec3DTO::serialize(Nv::Blast::Serialization::NvVec3::Builder builder, const nvidia::NvVec3 * poco) { NVBLAST_ASSERT(poco != nullptr); builder.setX(poco->x); builder.setY(poco->y); builder.setZ(poco->z); return true; } nvidia::NvVec3* NvVec3DTO::deserialize(Nv::Blast::Serialization::NvVec3::Reader reader) { //TODO: Allocate using ExtContext and return NV_UNUSED(reader); return nullptr; } bool NvVec3DTO::deserializeInto(Nv::Blast::Serialization::NvVec3::Reader reader, nvidia::NvVec3* target) { target->x = reader.getX(); target->y = reader.getY(); target->z = reader.getZ(); return true; } } // namespace Blast } // namespace Nv
2,346
C++
35.107692
105
0.738704
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxVec3DTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "PxVec3DTO.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { bool PxVec3DTO::serialize(Nv::Blast::Serialization::PxVec3::Builder builder, const physx::PxVec3 * poco) { NVBLAST_ASSERT(poco != nullptr); builder.setX(poco->x); builder.setY(poco->y); builder.setZ(poco->z); return true; } physx::PxVec3* PxVec3DTO::deserialize(Nv::Blast::Serialization::PxVec3::Reader reader) { //TODO: Allocate using ExtContext and return NV_UNUSED(reader); return nullptr; } bool PxVec3DTO::deserializeInto(Nv::Blast::Serialization::PxVec3::Reader reader, physx::PxVec3* target) { target->x = reader.getX(); target->y = reader.getY(); target->z = reader.getZ(); return true; } } // namespace Blast } // namespace Nv
2,343
C++
35.061538
104
0.73837
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastChunkDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastChunkDTO.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { bool NvBlastChunkDTO::serialize(Nv::Blast::Serialization::NvBlastChunk::Builder builder, const NvBlastChunk* poco) { NVBLAST_ASSERT(poco != nullptr); kj::ArrayPtr<const float> centArray(poco->centroid, 3); builder.setCentroid(centArray); builder.setVolume(poco->volume); builder.setParentChunkIndex(poco->parentChunkIndex); builder.setFirstChildIndex(poco->firstChildIndex); builder.setChildIndexStop(poco->childIndexStop); builder.setUserData(poco->userData); return true; } NvBlastChunk* NvBlastChunkDTO::deserialize(Nv::Blast::Serialization::NvBlastChunk::Reader reader) { //FIXME NV_UNUSED(reader); return nullptr; } bool NvBlastChunkDTO::deserializeInto(Nv::Blast::Serialization::NvBlastChunk::Reader reader, NvBlastChunk* target) { NVBLAST_ASSERT(target != nullptr); auto readerCentroid = reader.getCentroid(); target->centroid[0] = readerCentroid[0]; target->centroid[1] = readerCentroid[1]; target->centroid[2] = readerCentroid[2]; target->childIndexStop = reader.getChildIndexStop(); target->firstChildIndex = reader.getFirstChildIndex(); target->parentChunkIndex = reader.getParentChunkIndex(); target->userData = reader.getUserData(); target->volume = reader.getVolume(); return true; } } // namespace Blast } // namespace Nv
3,001
C++
35.168674
114
0.748417
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/FamilyDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "FamilyDTO.h" #include "ActorDTO.h" #include "AssetDTO.h" #include "FamilyGraphDTO.h" #include "NvBlastFamilyGraph.h" #include "NvBlastGlobals.h" #include "NvBlastIDDTO.h" #include "NvBlastChunkDTO.h" #include "NvBlastBondDTO.h" #include <vector> namespace Nv { namespace Blast { bool FamilyDTO::serialize(Nv::Blast::Serialization::Family::Builder builder, const Nv::Blast::FamilyHeader* poco) { NvBlastIDDTO::serialize(builder.initAssetID(), &poco->m_assetID); // cache off the count data from the asset needed to re-create the family post serialization const NvBlastAssetMemSizeData sizeData = NvBlastAssetMemSizeDataFromAsset(poco->m_asset); builder.setBondCount(sizeData.bondCount); builder.setChunkCount(sizeData.chunkCount); builder.setNodeCount(sizeData.nodeCount); builder.setLowerSupportChunkCount(sizeData.lowerSupportChunkCount); builder.setUpperSupportChunkCount(sizeData.upperSupportChunkCount); // actorCount - these are active builder.setActorCount(poco->m_actorCount); // all possible actors const uint32_t actorCount = poco->getActorsArraySize(); capnp::List<Nv::Blast::Serialization::Actor>::Builder actors = builder.initActors(actorCount); for (uint32_t i = 0; i < actorCount; i++) { Actor& actor = poco->getActors()[i]; ActorDTO::serialize(actors[i], &actor); } // visibleChunkIndexLinks uint32_t* visibleChunkIndexLinks = reinterpret_cast<uint32_t *>(poco->getVisibleChunkIndexLinks()); kj::ArrayPtr<uint32_t> visibleChunkIndexLinksArray(visibleChunkIndexLinks, sizeData.chunkCount * 2); builder.setVisibleChunkIndexLinks(visibleChunkIndexLinksArray); // chunkActorIndices kj::ArrayPtr<uint32_t> chunkActorIndicesArray(poco->getChunkActorIndices(), sizeData.chunkCount); builder.setChunkActorIndices(chunkActorIndicesArray); // graphNodeIndexLinks kj::ArrayPtr<uint32_t> graphNodeIndexLinksArray(poco->getGraphNodeIndexLinks(), sizeData.chunkCount); builder.setGraphNodeIndexLinks(graphNodeIndexLinksArray); // lowerSupportChunkHealths kj::ArrayPtr<float> lowerSupportChunkHealthsArray(poco->getLowerSupportChunkHealths(), sizeData.chunkCount); builder.setLowerSupportChunkHealths(lowerSupportChunkHealthsArray); // graphBondHealths kj::ArrayPtr<float> graphBondHealthsArray(poco->getBondHealths(), sizeData.bondCount); builder.setGraphBondHealths(graphBondHealthsArray); // familyGraph FamilyGraph *graph = poco->getFamilyGraph(); auto builderGraph = builder.initFamilyGraph(); builderGraph.setNodeCount(sizeData.nodeCount); FamilyGraphDTO::serialize(builderGraph, graph); return true; } Nv::Blast::FamilyHeader* FamilyDTO::deserialize(Nv::Blast::Serialization::Family::Reader reader) { // fill in the count info from the reader NvBlastAssetMemSizeData sizeData; sizeData.bondCount = reader.getBondCount(); sizeData.chunkCount = reader.getChunkCount(); sizeData.nodeCount = reader.getNodeCount(); sizeData.lowerSupportChunkCount = reader.getLowerSupportChunkCount(); sizeData.upperSupportChunkCount = reader.getUpperSupportChunkCount(); // allocate enough space to hold the family const size_t familySize = NvBlastAssetGetFamilyMemorySizeFromSizeData(sizeData, nullptr); void* mem = NVBLAST_ALLOC(familySize); // use the count info to initialize the family auto family = reinterpret_cast<Nv::Blast::FamilyHeader *>(NvBlastAssetCreateFamilyFromSizeData(mem, sizeData, Nv::Blast::logLL)); // then fill in the data from the reader if (deserializeInto(reader, family)) return family; // failed to deserialize, free the allocated memory so it doesn't leak NVBLAST_FREE(mem); return nullptr; } bool FamilyDTO::deserializeInto(Nv::Blast::Serialization::Family::Reader reader, Nv::Blast::FamilyHeader* poco) { NvBlastIDDTO::deserializeInto(reader.getAssetID(), &poco->m_assetID); // active actor count poco->m_actorCount = reader.getActorCount(); // all possible actors Actor* actors = poco->getActors(); auto readerActors = reader.getActors(); NVBLAST_ASSERT(poco->m_actorCount <= readerActors.size()); for (uint32_t i = 0; i < readerActors.size(); i++) { auto actorReader = readerActors[i]; ActorDTO::deserializeInto(actorReader, &actors[i]); } // visibleChunkIndexLinks // they are stored in the buffer as a flat list of uint32_t values, // but stored as pairs in the Family auto readerVisibleChunkIndexLinks = reader.getVisibleChunkIndexLinks(); const uint32_t numVisibleChunkIndexLinks = readerVisibleChunkIndexLinks.size(); for (uint32_t i = 0; i < numVisibleChunkIndexLinks; i += 2) { const uint32_t vcil = i / 2; poco->getVisibleChunkIndexLinks()[vcil].m_adj[0] = readerVisibleChunkIndexLinks[i]; poco->getVisibleChunkIndexLinks()[vcil].m_adj[1] = readerVisibleChunkIndexLinks[i+1]; } // chunkActorIndices auto readerChunkActorIndices = reader.getChunkActorIndices(); const uint32_t numChunkActorIndices = readerChunkActorIndices.size(); for (uint32_t i = 0; i < numChunkActorIndices; i++) { poco->getChunkActorIndices()[i] = readerChunkActorIndices[i]; } // graphNodeIndexLinks auto readerGraphNodeIndexLinks = reader.getGraphNodeIndexLinks(); const uint32_t numGraphNodeIndexLinks = readerGraphNodeIndexLinks.size(); for (uint32_t i = 0; i < numGraphNodeIndexLinks; i++) { poco->getGraphNodeIndexLinks()[i] = readerGraphNodeIndexLinks[i]; } // lowerSupportChunkHealths auto readerLowerSupportChunkHealths = reader.getLowerSupportChunkHealths(); const uint32_t numLowerSupportChunkHealths = readerLowerSupportChunkHealths.size(); for (uint32_t i = 0; i < numLowerSupportChunkHealths; i++) { poco->getLowerSupportChunkHealths()[i] = readerLowerSupportChunkHealths[i]; } // graphBondHealths auto readerGraphBondHealths = reader.getGraphBondHealths(); const uint32_t numGraphBondHealths = readerGraphBondHealths.size(); for (uint32_t i = 0; i < numGraphBondHealths; i++) { poco->getBondHealths()[i] = readerGraphBondHealths[i]; } // familyGraph FamilyGraphDTO::deserializeInto(reader.getFamilyGraph(), poco->getFamilyGraph()); return true; } } // namespace Blast } // namespace Nv
8,006
C++
39.64467
133
0.738696
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/ActorDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastActor.h" #include "NvBlastExtLlSerialization-capn.h" DTO_CLASS(Actor, Nv::Blast::Actor, Nv::Blast::Serialization::Actor)
1,738
C
51.696968
74
0.7687
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/FamilyGraphDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "FamilyGraphDTO.h" #include "NvBlastGlobals.h" namespace Nv { namespace Blast { bool FamilyGraphDTO::serialize(Nv::Blast::Serialization::FamilyGraph::Builder builder, const Nv::Blast::FamilyGraph * poco) { // this needs to be set externally so we have access to it here const uint32_t nodeCount = builder.getNodeCount(); kj::ArrayPtr<IslandId> islandIdsArray(poco->getIslandIds(), nodeCount); builder.setIslandIds(islandIdsArray); kj::ArrayPtr<NodeIndex> dirtyNodeLinksArray(poco->getDirtyNodeLinks(), nodeCount); builder.setDirtyNodeLinks(dirtyNodeLinksArray); kj::ArrayPtr<uint32_t> firstDirtyNodeIndicesArray(poco->getFirstDirtyNodeIndices(), nodeCount); builder.setFirstDirtyNodeIndices(firstDirtyNodeIndicesArray); kj::ArrayPtr<NodeIndex> fastRouteArray(poco->getFastRoute(), nodeCount); builder.setFastRoute(fastRouteArray); kj::ArrayPtr<uint32_t> hopCountsArray(poco->getHopCounts(), nodeCount); builder.setHopCounts(hopCountsArray); auto isEdgeRemoved = poco->getIsEdgeRemoved(); uint8_t* isEdgeRemovedData = reinterpret_cast<uint8_t*>(const_cast<char*>(isEdgeRemoved->getData())); capnp::Data::Reader isEdgeRemovedReader(isEdgeRemovedData, isEdgeRemoved->getSize()); builder.setIsEdgeRemoved(isEdgeRemovedReader); auto isNodeInDirtyList = poco->getIsNodeInDirtyList(); uint8_t* isNodeInDirtyListData = reinterpret_cast<uint8_t*>(const_cast<char*>(isNodeInDirtyList->getData())); capnp::Data::Reader isNodeInDirtyListReader(isNodeInDirtyListData, isNodeInDirtyList->getSize()); builder.setIsNodeInDirtyList(isNodeInDirtyListReader); return true; } Nv::Blast::FamilyGraph* FamilyGraphDTO::deserialize(Nv::Blast::Serialization::FamilyGraph::Reader reader) { NV_UNUSED(reader); return nullptr; } bool FamilyGraphDTO::deserializeInto(Nv::Blast::Serialization::FamilyGraph::Reader reader, Nv::Blast::FamilyGraph * poco) { auto readerIslandIds = reader.getIslandIds(); const uint32_t numIslandIds = readerIslandIds.size(); for (uint32_t i = 0; i < numIslandIds; i++) { poco->getIslandIds()[i] = readerIslandIds[i]; } auto readerDirtyNodeLinks = reader.getDirtyNodeLinks(); const uint32_t numDirtyNodeLinks = readerDirtyNodeLinks.size(); for (uint32_t i = 0; i < numDirtyNodeLinks; i++) { poco->getDirtyNodeLinks()[i] = readerDirtyNodeLinks[i]; } auto readerFirstDirtyNodeIndices = reader.getFirstDirtyNodeIndices(); const uint32_t numFirstDirtyNodeIndices = readerFirstDirtyNodeIndices.size(); for (uint32_t i = 0; i < numFirstDirtyNodeIndices; i++) { poco->getFirstDirtyNodeIndices()[i] = readerFirstDirtyNodeIndices[i]; } auto readerFastRoute = reader.getFastRoute(); const uint32_t numFastRoute = readerFastRoute.size(); for (uint32_t i = 0; i < numFastRoute; i++) { poco->getFastRoute()[i] = readerFastRoute[i]; } auto readerHopCounts = reader.getHopCounts(); const uint32_t numHopCounts = readerHopCounts.size(); for (uint32_t i = 0; i < numHopCounts; i++) { poco->getHopCounts()[i] = readerHopCounts[i]; } auto readerIsEdgeRemoved = reader.getIsEdgeRemoved(); const uint32_t numIsEdgeRemoved = readerIsEdgeRemoved.size(); const char* isEdgeRemovedData = reinterpret_cast<const char*>(readerIsEdgeRemoved.begin()); auto isEdgeRemoved = poco->getIsEdgeRemoved(); isEdgeRemoved->setData(isEdgeRemovedData, numIsEdgeRemoved); auto readerIsNodeInDirtyList = reader.getIsNodeInDirtyList(); const uint32_t numIsNodeInDirtyList = readerIsNodeInDirtyList.size(); const char* readerIsNodeInDirtyListData = reinterpret_cast<const char*>(readerIsNodeInDirtyList.begin()); auto isNodeInDirtyList = poco->getIsNodeInDirtyList(); isNodeInDirtyList->setData(readerIsNodeInDirtyListData, numIsNodeInDirtyList); return true; } } // namespace Blast } // namespace Nv
5,532
C++
40.916666
123
0.743312
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/PxMeshScaleDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "PxMeshScale.h" #include "NvBlastExtPxSerialization-capn.h" #include "PxCooking.h" DTO_CLASS(PxMeshScale, physx::PxMeshScale, Nv::Blast::Serialization::PxMeshScale)
1,775
C
49.742856
81
0.770704
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/TkAssetJointDescDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastTkAsset.h" #include "NvBlastExtTkSerialization-capn.h" DTO_CLASS(TkAssetJointDesc, Nv::Blast::TkAssetJointDesc, Nv::Blast::Serialization::TkAssetJointDesc)
1,774
C
51.205881
100
0.77283
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/FamilyGraphDTO.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #pragma once #include "DTOMacros.h" #include "NvBlastFamilyGraph.h" #include "NvBlastExtLlSerialization-capn.h" DTO_CLASS(FamilyGraph, Nv::Blast::FamilyGraph, Nv::Blast::Serialization::FamilyGraph)
1,763
C
50.882351
85
0.771412
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/DTO/NvBlastIDDTO.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastIDDTO.h" #include "NvBlastTypes.h" #include "NvBlastAssert.h" #include "NvBlastExtLlSerialization-capn.h" namespace Nv { namespace Blast { bool NvBlastIDDTO::serialize(Nv::Blast::Serialization::UUID::Builder builder, const NvBlastID * poco) { capnp::Data::Reader idArrayReader((unsigned char *)poco->data, 16); builder.setValue(idArrayReader); return true; } NvBlastID* NvBlastIDDTO::deserialize(Nv::Blast::Serialization::UUID::Reader reader) { //FIXME NV_UNUSED(reader); //TODO: Allocate with ExtContext and return return nullptr; } bool NvBlastIDDTO::deserializeInto(Nv::Blast::Serialization::UUID::Reader reader, NvBlastID * poco) { NVBLAST_ASSERT_WITH_MESSAGE(reader.getValue().size() == 16, "BlastID must be 16 bytes"); memcpy(poco, reader.getValue().begin(), 16); return true; } } // namespace Blast } // namespace Nv
2,466
C++
34.242857
101
0.745742
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/generated/NvBlastExtLlSerialization-capn.h
// Generated by Cap'n Proto compiler, DO NOT EDIT // source: NvBlastExtLlSerialization-capn #ifndef CAPNP_INCLUDED_9a4a58fac38375e0_ #define CAPNP_INCLUDED_9a4a58fac38375e0_ #include <capnp/generated-header-support.h> #if CAPNP_VERSION != 6001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif namespace capnp { namespace schemas { CAPNP_DECLARE_SCHEMA(ce4f8468c36f427d); CAPNP_DECLARE_SCHEMA(fe6948a9a6a3eff5); CAPNP_DECLARE_SCHEMA(d20ccbe36dd9711d); CAPNP_DECLARE_SCHEMA(8a38616881ef8310); CAPNP_DECLARE_SCHEMA(d5e1a9fb31b1350d); CAPNP_DECLARE_SCHEMA(b292bd608606f041); enum class Type_b292bd608606f041: uint16_t { ASSET_DATA_BLOCK, INSTANCE_DATA_BLOCK, }; CAPNP_DECLARE_ENUM(Type, b292bd608606f041); CAPNP_DECLARE_SCHEMA(92818c664a7b1aba); CAPNP_DECLARE_SCHEMA(c43da43c95eada67); CAPNP_DECLARE_SCHEMA(f018cbfcaacb3a55); CAPNP_DECLARE_SCHEMA(bfd00835cc19bf3a); } // namespace schemas } // namespace capnp namespace Nv { namespace Blast { namespace Serialization { struct Asset { Asset() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(ce4f8468c36f427d, 2, 7) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct Family { Family() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(fe6948a9a6a3eff5, 3, 8) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct Actor { Actor() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(d20ccbe36dd9711d, 3, 0) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct FamilyGraph { FamilyGraph() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(8a38616881ef8310, 1, 7) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastDataBlock { NvBlastDataBlock() = delete; class Reader; class Builder; class Pipeline; typedef ::capnp::schemas::Type_b292bd608606f041 Type; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(d5e1a9fb31b1350d, 2, 0) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastChunk { NvBlastChunk() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(92818c664a7b1aba, 3, 1) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastBond { NvBlastBond() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(c43da43c95eada67, 1, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvBlastSupportGraph { NvBlastSupportGraph() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(f018cbfcaacb3a55, 1, 4) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct UUID { UUID() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(bfd00835cc19bf3a, 0, 1) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; // ======================================================================================= class Asset::Reader { public: typedef Asset Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasHeader() const; inline ::Nv::Blast::Serialization::NvBlastDataBlock::Reader getHeader() const; inline bool hasID() const; inline ::Nv::Blast::Serialization::UUID::Reader getID() const; inline ::uint32_t getChunkCount() const; inline bool hasGraph() const; inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader getGraph() const; inline ::uint32_t getLeafChunkCount() const; inline ::uint32_t getFirstSubsupportChunkIndex() const; inline ::uint32_t getBondCount() const; inline bool hasChunks() const; inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader getChunks() const; inline bool hasBonds() const; inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader getBonds() const; inline bool hasSubtreeLeafChunkCounts() const; inline ::capnp::List< ::uint32_t>::Reader getSubtreeLeafChunkCounts() const; inline bool hasChunkToGraphNodeMap() const; inline ::capnp::List< ::uint32_t>::Reader getChunkToGraphNodeMap() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class Asset::Builder { public: typedef Asset Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasHeader(); inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder getHeader(); inline void setHeader( ::Nv::Blast::Serialization::NvBlastDataBlock::Reader value); inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder initHeader(); inline void adoptHeader(::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock> disownHeader(); inline bool hasID(); inline ::Nv::Blast::Serialization::UUID::Builder getID(); inline void setID( ::Nv::Blast::Serialization::UUID::Reader value); inline ::Nv::Blast::Serialization::UUID::Builder initID(); inline void adoptID(::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> disownID(); inline ::uint32_t getChunkCount(); inline void setChunkCount( ::uint32_t value); inline bool hasGraph(); inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder getGraph(); inline void setGraph( ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader value); inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder initGraph(); inline void adoptGraph(::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph> disownGraph(); inline ::uint32_t getLeafChunkCount(); inline void setLeafChunkCount( ::uint32_t value); inline ::uint32_t getFirstSubsupportChunkIndex(); inline void setFirstSubsupportChunkIndex( ::uint32_t value); inline ::uint32_t getBondCount(); inline void setBondCount( ::uint32_t value); inline bool hasChunks(); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder getChunks(); inline void setChunks( ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder initChunks(unsigned int size); inline void adoptChunks(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>> disownChunks(); inline bool hasBonds(); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder getBonds(); inline void setBonds( ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder initBonds(unsigned int size); inline void adoptBonds(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>> disownBonds(); inline bool hasSubtreeLeafChunkCounts(); inline ::capnp::List< ::uint32_t>::Builder getSubtreeLeafChunkCounts(); inline void setSubtreeLeafChunkCounts( ::capnp::List< ::uint32_t>::Reader value); inline void setSubtreeLeafChunkCounts(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initSubtreeLeafChunkCounts(unsigned int size); inline void adoptSubtreeLeafChunkCounts(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownSubtreeLeafChunkCounts(); inline bool hasChunkToGraphNodeMap(); inline ::capnp::List< ::uint32_t>::Builder getChunkToGraphNodeMap(); inline void setChunkToGraphNodeMap( ::capnp::List< ::uint32_t>::Reader value); inline void setChunkToGraphNodeMap(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initChunkToGraphNodeMap(unsigned int size); inline void adoptChunkToGraphNodeMap(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownChunkToGraphNodeMap(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class Asset::Pipeline { public: typedef Asset Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} inline ::Nv::Blast::Serialization::NvBlastDataBlock::Pipeline getHeader(); inline ::Nv::Blast::Serialization::UUID::Pipeline getID(); inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Pipeline getGraph(); private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class Family::Reader { public: typedef Family Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasAssetID() const; inline ::Nv::Blast::Serialization::UUID::Reader getAssetID() const; inline bool hasActors() const; inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader getActors() const; inline bool hasVisibleChunkIndexLinks() const; inline ::capnp::List< ::uint32_t>::Reader getVisibleChunkIndexLinks() const; inline bool hasChunkActorIndices() const; inline ::capnp::List< ::uint32_t>::Reader getChunkActorIndices() const; inline bool hasGraphNodeIndexLinks() const; inline ::capnp::List< ::uint32_t>::Reader getGraphNodeIndexLinks() const; inline bool hasLowerSupportChunkHealths() const; inline ::capnp::List<float>::Reader getLowerSupportChunkHealths() const; inline bool hasGraphBondHealths() const; inline ::capnp::List<float>::Reader getGraphBondHealths() const; inline bool hasFamilyGraph() const; inline ::Nv::Blast::Serialization::FamilyGraph::Reader getFamilyGraph() const; inline ::uint32_t getActorCount() const; inline ::uint32_t getBondCount() const; inline ::uint32_t getChunkCount() const; inline ::uint32_t getNodeCount() const; inline ::uint32_t getLowerSupportChunkCount() const; inline ::uint32_t getUpperSupportChunkCount() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class Family::Builder { public: typedef Family Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasAssetID(); inline ::Nv::Blast::Serialization::UUID::Builder getAssetID(); inline void setAssetID( ::Nv::Blast::Serialization::UUID::Reader value); inline ::Nv::Blast::Serialization::UUID::Builder initAssetID(); inline void adoptAssetID(::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> disownAssetID(); inline bool hasActors(); inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder getActors(); inline void setActors( ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder initActors(unsigned int size); inline void adoptActors(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>> disownActors(); inline bool hasVisibleChunkIndexLinks(); inline ::capnp::List< ::uint32_t>::Builder getVisibleChunkIndexLinks(); inline void setVisibleChunkIndexLinks( ::capnp::List< ::uint32_t>::Reader value); inline void setVisibleChunkIndexLinks(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initVisibleChunkIndexLinks(unsigned int size); inline void adoptVisibleChunkIndexLinks(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownVisibleChunkIndexLinks(); inline bool hasChunkActorIndices(); inline ::capnp::List< ::uint32_t>::Builder getChunkActorIndices(); inline void setChunkActorIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setChunkActorIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initChunkActorIndices(unsigned int size); inline void adoptChunkActorIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownChunkActorIndices(); inline bool hasGraphNodeIndexLinks(); inline ::capnp::List< ::uint32_t>::Builder getGraphNodeIndexLinks(); inline void setGraphNodeIndexLinks( ::capnp::List< ::uint32_t>::Reader value); inline void setGraphNodeIndexLinks(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initGraphNodeIndexLinks(unsigned int size); inline void adoptGraphNodeIndexLinks(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownGraphNodeIndexLinks(); inline bool hasLowerSupportChunkHealths(); inline ::capnp::List<float>::Builder getLowerSupportChunkHealths(); inline void setLowerSupportChunkHealths( ::capnp::List<float>::Reader value); inline void setLowerSupportChunkHealths(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initLowerSupportChunkHealths(unsigned int size); inline void adoptLowerSupportChunkHealths(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownLowerSupportChunkHealths(); inline bool hasGraphBondHealths(); inline ::capnp::List<float>::Builder getGraphBondHealths(); inline void setGraphBondHealths( ::capnp::List<float>::Reader value); inline void setGraphBondHealths(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initGraphBondHealths(unsigned int size); inline void adoptGraphBondHealths(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownGraphBondHealths(); inline bool hasFamilyGraph(); inline ::Nv::Blast::Serialization::FamilyGraph::Builder getFamilyGraph(); inline void setFamilyGraph( ::Nv::Blast::Serialization::FamilyGraph::Reader value); inline ::Nv::Blast::Serialization::FamilyGraph::Builder initFamilyGraph(); inline void adoptFamilyGraph(::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph> disownFamilyGraph(); inline ::uint32_t getActorCount(); inline void setActorCount( ::uint32_t value); inline ::uint32_t getBondCount(); inline void setBondCount( ::uint32_t value); inline ::uint32_t getChunkCount(); inline void setChunkCount( ::uint32_t value); inline ::uint32_t getNodeCount(); inline void setNodeCount( ::uint32_t value); inline ::uint32_t getLowerSupportChunkCount(); inline void setLowerSupportChunkCount( ::uint32_t value); inline ::uint32_t getUpperSupportChunkCount(); inline void setUpperSupportChunkCount( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class Family::Pipeline { public: typedef Family Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} inline ::Nv::Blast::Serialization::UUID::Pipeline getAssetID(); inline ::Nv::Blast::Serialization::FamilyGraph::Pipeline getFamilyGraph(); private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class Actor::Reader { public: typedef Actor Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline ::uint32_t getFamilyOffset() const; inline ::uint32_t getFirstVisibleChunkIndex() const; inline ::uint32_t getVisibleChunkCount() const; inline ::uint32_t getFirstGraphNodeIndex() const; inline ::uint32_t getGraphNodeCount() const; inline ::uint32_t getLeafChunkCount() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class Actor::Builder { public: typedef Actor Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline ::uint32_t getFamilyOffset(); inline void setFamilyOffset( ::uint32_t value); inline ::uint32_t getFirstVisibleChunkIndex(); inline void setFirstVisibleChunkIndex( ::uint32_t value); inline ::uint32_t getVisibleChunkCount(); inline void setVisibleChunkCount( ::uint32_t value); inline ::uint32_t getFirstGraphNodeIndex(); inline void setFirstGraphNodeIndex( ::uint32_t value); inline ::uint32_t getGraphNodeCount(); inline void setGraphNodeCount( ::uint32_t value); inline ::uint32_t getLeafChunkCount(); inline void setLeafChunkCount( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class Actor::Pipeline { public: typedef Actor Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class FamilyGraph::Reader { public: typedef FamilyGraph Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasIslandIds() const; inline ::capnp::List< ::uint32_t>::Reader getIslandIds() const; inline bool hasDirtyNodeLinks() const; inline ::capnp::List< ::uint32_t>::Reader getDirtyNodeLinks() const; inline bool hasFirstDirtyNodeIndices() const; inline ::capnp::List< ::uint32_t>::Reader getFirstDirtyNodeIndices() const; inline bool hasFastRoute() const; inline ::capnp::List< ::uint32_t>::Reader getFastRoute() const; inline bool hasHopCounts() const; inline ::capnp::List< ::uint32_t>::Reader getHopCounts() const; inline bool hasIsEdgeRemoved() const; inline ::capnp::Data::Reader getIsEdgeRemoved() const; inline bool hasIsNodeInDirtyList() const; inline ::capnp::Data::Reader getIsNodeInDirtyList() const; inline ::uint32_t getNodeCount() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class FamilyGraph::Builder { public: typedef FamilyGraph Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasIslandIds(); inline ::capnp::List< ::uint32_t>::Builder getIslandIds(); inline void setIslandIds( ::capnp::List< ::uint32_t>::Reader value); inline void setIslandIds(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initIslandIds(unsigned int size); inline void adoptIslandIds(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownIslandIds(); inline bool hasDirtyNodeLinks(); inline ::capnp::List< ::uint32_t>::Builder getDirtyNodeLinks(); inline void setDirtyNodeLinks( ::capnp::List< ::uint32_t>::Reader value); inline void setDirtyNodeLinks(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initDirtyNodeLinks(unsigned int size); inline void adoptDirtyNodeLinks(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownDirtyNodeLinks(); inline bool hasFirstDirtyNodeIndices(); inline ::capnp::List< ::uint32_t>::Builder getFirstDirtyNodeIndices(); inline void setFirstDirtyNodeIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setFirstDirtyNodeIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initFirstDirtyNodeIndices(unsigned int size); inline void adoptFirstDirtyNodeIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownFirstDirtyNodeIndices(); inline bool hasFastRoute(); inline ::capnp::List< ::uint32_t>::Builder getFastRoute(); inline void setFastRoute( ::capnp::List< ::uint32_t>::Reader value); inline void setFastRoute(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initFastRoute(unsigned int size); inline void adoptFastRoute(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownFastRoute(); inline bool hasHopCounts(); inline ::capnp::List< ::uint32_t>::Builder getHopCounts(); inline void setHopCounts( ::capnp::List< ::uint32_t>::Reader value); inline void setHopCounts(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initHopCounts(unsigned int size); inline void adoptHopCounts(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownHopCounts(); inline bool hasIsEdgeRemoved(); inline ::capnp::Data::Builder getIsEdgeRemoved(); inline void setIsEdgeRemoved( ::capnp::Data::Reader value); inline ::capnp::Data::Builder initIsEdgeRemoved(unsigned int size); inline void adoptIsEdgeRemoved(::capnp::Orphan< ::capnp::Data>&& value); inline ::capnp::Orphan< ::capnp::Data> disownIsEdgeRemoved(); inline bool hasIsNodeInDirtyList(); inline ::capnp::Data::Builder getIsNodeInDirtyList(); inline void setIsNodeInDirtyList( ::capnp::Data::Reader value); inline ::capnp::Data::Builder initIsNodeInDirtyList(unsigned int size); inline void adoptIsNodeInDirtyList(::capnp::Orphan< ::capnp::Data>&& value); inline ::capnp::Orphan< ::capnp::Data> disownIsNodeInDirtyList(); inline ::uint32_t getNodeCount(); inline void setNodeCount( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class FamilyGraph::Pipeline { public: typedef FamilyGraph Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastDataBlock::Reader { public: typedef NvBlastDataBlock Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type getDataType() const; inline ::uint32_t getFormatVersion() const; inline ::uint32_t getSize() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastDataBlock::Builder { public: typedef NvBlastDataBlock Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type getDataType(); inline void setDataType( ::Nv::Blast::Serialization::NvBlastDataBlock::Type value); inline ::uint32_t getFormatVersion(); inline void setFormatVersion( ::uint32_t value); inline ::uint32_t getSize(); inline void setSize( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastDataBlock::Pipeline { public: typedef NvBlastDataBlock Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastChunk::Reader { public: typedef NvBlastChunk Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasCentroid() const; inline ::capnp::List<float>::Reader getCentroid() const; inline float getVolume() const; inline ::uint32_t getParentChunkIndex() const; inline ::uint32_t getFirstChildIndex() const; inline ::uint32_t getChildIndexStop() const; inline ::uint32_t getUserData() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastChunk::Builder { public: typedef NvBlastChunk Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasCentroid(); inline ::capnp::List<float>::Builder getCentroid(); inline void setCentroid( ::capnp::List<float>::Reader value); inline void setCentroid(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initCentroid(unsigned int size); inline void adoptCentroid(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownCentroid(); inline float getVolume(); inline void setVolume(float value); inline ::uint32_t getParentChunkIndex(); inline void setParentChunkIndex( ::uint32_t value); inline ::uint32_t getFirstChildIndex(); inline void setFirstChildIndex( ::uint32_t value); inline ::uint32_t getChildIndexStop(); inline void setChildIndexStop( ::uint32_t value); inline ::uint32_t getUserData(); inline void setUserData( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastChunk::Pipeline { public: typedef NvBlastChunk Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastBond::Reader { public: typedef NvBlastBond Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasNormal() const; inline ::capnp::List<float>::Reader getNormal() const; inline float getArea() const; inline bool hasCentroid() const; inline ::capnp::List<float>::Reader getCentroid() const; inline ::uint32_t getUserData() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastBond::Builder { public: typedef NvBlastBond Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasNormal(); inline ::capnp::List<float>::Builder getNormal(); inline void setNormal( ::capnp::List<float>::Reader value); inline void setNormal(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initNormal(unsigned int size); inline void adoptNormal(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownNormal(); inline float getArea(); inline void setArea(float value); inline bool hasCentroid(); inline ::capnp::List<float>::Builder getCentroid(); inline void setCentroid( ::capnp::List<float>::Reader value); inline void setCentroid(::kj::ArrayPtr<const float> value); inline ::capnp::List<float>::Builder initCentroid(unsigned int size); inline void adoptCentroid(::capnp::Orphan< ::capnp::List<float>>&& value); inline ::capnp::Orphan< ::capnp::List<float>> disownCentroid(); inline ::uint32_t getUserData(); inline void setUserData( ::uint32_t value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastBond::Pipeline { public: typedef NvBlastBond Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvBlastSupportGraph::Reader { public: typedef NvBlastSupportGraph Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline ::uint32_t getNodeCount() const; inline bool hasChunkIndices() const; inline ::capnp::List< ::uint32_t>::Reader getChunkIndices() const; inline bool hasAdjacencyPartition() const; inline ::capnp::List< ::uint32_t>::Reader getAdjacencyPartition() const; inline bool hasAdjacentNodeIndices() const; inline ::capnp::List< ::uint32_t>::Reader getAdjacentNodeIndices() const; inline bool hasAdjacentBondIndices() const; inline ::capnp::List< ::uint32_t>::Reader getAdjacentBondIndices() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvBlastSupportGraph::Builder { public: typedef NvBlastSupportGraph Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline ::uint32_t getNodeCount(); inline void setNodeCount( ::uint32_t value); inline bool hasChunkIndices(); inline ::capnp::List< ::uint32_t>::Builder getChunkIndices(); inline void setChunkIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setChunkIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initChunkIndices(unsigned int size); inline void adoptChunkIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownChunkIndices(); inline bool hasAdjacencyPartition(); inline ::capnp::List< ::uint32_t>::Builder getAdjacencyPartition(); inline void setAdjacencyPartition( ::capnp::List< ::uint32_t>::Reader value); inline void setAdjacencyPartition(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initAdjacencyPartition(unsigned int size); inline void adoptAdjacencyPartition(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownAdjacencyPartition(); inline bool hasAdjacentNodeIndices(); inline ::capnp::List< ::uint32_t>::Builder getAdjacentNodeIndices(); inline void setAdjacentNodeIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setAdjacentNodeIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initAdjacentNodeIndices(unsigned int size); inline void adoptAdjacentNodeIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownAdjacentNodeIndices(); inline bool hasAdjacentBondIndices(); inline ::capnp::List< ::uint32_t>::Builder getAdjacentBondIndices(); inline void setAdjacentBondIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setAdjacentBondIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initAdjacentBondIndices(unsigned int size); inline void adoptAdjacentBondIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownAdjacentBondIndices(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvBlastSupportGraph::Pipeline { public: typedef NvBlastSupportGraph Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class UUID::Reader { public: typedef UUID Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasValue() const; inline ::capnp::Data::Reader getValue() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class UUID::Builder { public: typedef UUID Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasValue(); inline ::capnp::Data::Builder getValue(); inline void setValue( ::capnp::Data::Reader value); inline ::capnp::Data::Builder initValue(unsigned int size); inline void adoptValue(::capnp::Orphan< ::capnp::Data>&& value); inline ::capnp::Orphan< ::capnp::Data> disownValue(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class UUID::Pipeline { public: typedef UUID Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE // ======================================================================================= inline bool Asset::Reader::hasHeader() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasHeader() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Reader Asset::Reader::getHeader() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder Asset::Builder::getHeader() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastDataBlock::Pipeline Asset::Pipeline::getHeader() { return ::Nv::Blast::Serialization::NvBlastDataBlock::Pipeline(_typeless.getPointerField(0)); } #endif // !CAPNP_LITE inline void Asset::Builder::setHeader( ::Nv::Blast::Serialization::NvBlastDataBlock::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Builder Asset::Builder::initHeader() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void Asset::Builder::adoptHeader( ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastDataBlock> Asset::Builder::disownHeader() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastDataBlock>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasID() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasID() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::UUID::Reader Asset::Reader::getID() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::UUID::Builder Asset::Builder::getID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::UUID::Pipeline Asset::Pipeline::getID() { return ::Nv::Blast::Serialization::UUID::Pipeline(_typeless.getPointerField(1)); } #endif // !CAPNP_LITE inline void Asset::Builder::setID( ::Nv::Blast::Serialization::UUID::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::UUID::Builder Asset::Builder::initID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void Asset::Builder::adoptID( ::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> Asset::Builder::disownID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::uint32_t Asset::Reader::getChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline bool Asset::Reader::hasGraph() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasGraph() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader Asset::Reader::getGraph() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder Asset::Builder::getGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Pipeline Asset::Pipeline::getGraph() { return ::Nv::Blast::Serialization::NvBlastSupportGraph::Pipeline(_typeless.getPointerField(2)); } #endif // !CAPNP_LITE inline void Asset::Builder::setGraph( ::Nv::Blast::Serialization::NvBlastSupportGraph::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::NvBlastSupportGraph::Builder Asset::Builder::initGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void Asset::Builder::adoptGraph( ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::NvBlastSupportGraph> Asset::Builder::disownGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::NvBlastSupportGraph>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::uint32_t Asset::Reader::getLeafChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getLeafChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setLeafChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Asset::Reader::getFirstSubsupportChunkIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getFirstSubsupportChunkIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setFirstSubsupportChunkIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Asset::Reader::getBondCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t Asset::Builder::getBondCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void Asset::Builder::setBondCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline bool Asset::Reader::hasChunks() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasChunks() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader Asset::Reader::getChunks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder Asset::Builder::getChunks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void Asset::Builder::setChunks( ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>::Builder Asset::Builder::initChunks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptChunks( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>> Asset::Builder::disownChunks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastChunk>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasBonds() const { return !_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasBonds() { return !_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader Asset::Reader::getBonds() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::get(_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder Asset::Builder::getBonds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::get(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline void Asset::Builder::setBonds( ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>::Builder Asset::Builder::initBonds(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::init(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptBonds( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::adopt(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>> Asset::Builder::disownBonds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvBlastBond>>::disown(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasSubtreeLeafChunkCounts() const { return !_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasSubtreeLeafChunkCounts() { return !_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Asset::Reader::getSubtreeLeafChunkCounts() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::getSubtreeLeafChunkCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline void Asset::Builder::setSubtreeLeafChunkCounts( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline void Asset::Builder::setSubtreeLeafChunkCounts(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::initSubtreeLeafChunkCounts(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptSubtreeLeafChunkCounts( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Asset::Builder::disownSubtreeLeafChunkCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline bool Asset::Reader::hasChunkToGraphNodeMap() const { return !_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline bool Asset::Builder::hasChunkToGraphNodeMap() { return !_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Asset::Reader::getChunkToGraphNodeMap() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::getChunkToGraphNodeMap() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline void Asset::Builder::setChunkToGraphNodeMap( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline void Asset::Builder::setChunkToGraphNodeMap(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Asset::Builder::initChunkToGraphNodeMap(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), size); } inline void Asset::Builder::adoptChunkToGraphNodeMap( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Asset::Builder::disownChunkToGraphNodeMap() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasAssetID() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasAssetID() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::UUID::Reader Family::Reader::getAssetID() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::UUID::Builder Family::Builder::getAssetID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::UUID::Pipeline Family::Pipeline::getAssetID() { return ::Nv::Blast::Serialization::UUID::Pipeline(_typeless.getPointerField(0)); } #endif // !CAPNP_LITE inline void Family::Builder::setAssetID( ::Nv::Blast::Serialization::UUID::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::UUID::Builder Family::Builder::initAssetID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void Family::Builder::adoptAssetID( ::capnp::Orphan< ::Nv::Blast::Serialization::UUID>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::UUID> Family::Builder::disownAssetID() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::UUID>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasActors() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasActors() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader Family::Reader::getActors() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder Family::Builder::getActors() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void Family::Builder::setActors( ::capnp::List< ::Nv::Blast::Serialization::Actor>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::Actor>::Builder Family::Builder::initActors(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptActors( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::Actor>> Family::Builder::disownActors() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::Actor>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasVisibleChunkIndexLinks() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasVisibleChunkIndexLinks() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Family::Reader::getVisibleChunkIndexLinks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::getVisibleChunkIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void Family::Builder::setVisibleChunkIndexLinks( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline void Family::Builder::setVisibleChunkIndexLinks(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::initVisibleChunkIndexLinks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptVisibleChunkIndexLinks( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Family::Builder::disownVisibleChunkIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasChunkActorIndices() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasChunkActorIndices() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Family::Reader::getChunkActorIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::getChunkActorIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void Family::Builder::setChunkActorIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline void Family::Builder::setChunkActorIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::initChunkActorIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptChunkActorIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Family::Builder::disownChunkActorIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasGraphNodeIndexLinks() const { return !_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasGraphNodeIndexLinks() { return !_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader Family::Reader::getGraphNodeIndexLinks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::getGraphNodeIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline void Family::Builder::setGraphNodeIndexLinks( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline void Family::Builder::setGraphNodeIndexLinks(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder Family::Builder::initGraphNodeIndexLinks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptGraphNodeIndexLinks( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> Family::Builder::disownGraphNodeIndexLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasLowerSupportChunkHealths() const { return !_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasLowerSupportChunkHealths() { return !_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader Family::Reader::getLowerSupportChunkHealths() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder Family::Builder::getLowerSupportChunkHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline void Family::Builder::setLowerSupportChunkHealths( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline void Family::Builder::setLowerSupportChunkHealths(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder Family::Builder::initLowerSupportChunkHealths(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptLowerSupportChunkHealths( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> Family::Builder::disownLowerSupportChunkHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasGraphBondHealths() const { return !_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasGraphBondHealths() { return !_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader Family::Reader::getGraphBondHealths() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder Family::Builder::getGraphBondHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline void Family::Builder::setGraphBondHealths( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline void Family::Builder::setGraphBondHealths(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder Family::Builder::initGraphBondHealths(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), size); } inline void Family::Builder::adoptGraphBondHealths( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> Family::Builder::disownGraphBondHealths() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline bool Family::Reader::hasFamilyGraph() const { return !_reader.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS).isNull(); } inline bool Family::Builder::hasFamilyGraph() { return !_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::FamilyGraph::Reader Family::Reader::getFamilyGraph() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::get(_reader.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::FamilyGraph::Builder Family::Builder::getFamilyGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::get(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::FamilyGraph::Pipeline Family::Pipeline::getFamilyGraph() { return ::Nv::Blast::Serialization::FamilyGraph::Pipeline(_typeless.getPointerField(7)); } #endif // !CAPNP_LITE inline void Family::Builder::setFamilyGraph( ::Nv::Blast::Serialization::FamilyGraph::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::set(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::FamilyGraph::Builder Family::Builder::initFamilyGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::init(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } inline void Family::Builder::adoptFamilyGraph( ::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::adopt(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::FamilyGraph> Family::Builder::disownFamilyGraph() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::FamilyGraph>::disown(_builder.getPointerField( ::capnp::bounded<7>() * ::capnp::POINTERS)); } inline ::uint32_t Family::Reader::getActorCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getActorCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void Family::Builder::setActorCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getBondCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getBondCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void Family::Builder::setBondCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void Family::Builder::setChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void Family::Builder::setNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getLowerSupportChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getLowerSupportChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline void Family::Builder::setLowerSupportChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Family::Reader::getUpperSupportChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline ::uint32_t Family::Builder::getUpperSupportChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline void Family::Builder::setUpperSupportChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getFamilyOffset() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getFamilyOffset() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setFamilyOffset( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getFirstVisibleChunkIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getFirstVisibleChunkIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setFirstVisibleChunkIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getVisibleChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getVisibleChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setVisibleChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getFirstGraphNodeIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getFirstGraphNodeIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setFirstGraphNodeIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getGraphNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getGraphNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setGraphNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS, value); } inline ::uint32_t Actor::Reader::getLeafChunkCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline ::uint32_t Actor::Builder::getLeafChunkCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS); } inline void Actor::Builder::setLeafChunkCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<5>() * ::capnp::ELEMENTS, value); } inline bool FamilyGraph::Reader::hasIslandIds() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasIslandIds() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getIslandIds() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getIslandIds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setIslandIds( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setIslandIds(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initIslandIds(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptIslandIds( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownIslandIds() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasDirtyNodeLinks() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasDirtyNodeLinks() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getDirtyNodeLinks() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getDirtyNodeLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setDirtyNodeLinks( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setDirtyNodeLinks(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initDirtyNodeLinks(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptDirtyNodeLinks( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownDirtyNodeLinks() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasFirstDirtyNodeIndices() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasFirstDirtyNodeIndices() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getFirstDirtyNodeIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getFirstDirtyNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setFirstDirtyNodeIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setFirstDirtyNodeIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initFirstDirtyNodeIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptFirstDirtyNodeIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownFirstDirtyNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasFastRoute() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasFastRoute() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getFastRoute() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getFastRoute() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setFastRoute( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setFastRoute(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initFastRoute(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptFastRoute( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownFastRoute() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasHopCounts() const { return !_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasHopCounts() { return !_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader FamilyGraph::Reader::getHopCounts() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::getHopCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setHopCounts( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline void FamilyGraph::Builder::setHopCounts(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder FamilyGraph::Builder::initHopCounts(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptHopCounts( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> FamilyGraph::Builder::disownHopCounts() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<4>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasIsEdgeRemoved() const { return !_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasIsEdgeRemoved() { return !_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS).isNull(); } inline ::capnp::Data::Reader FamilyGraph::Reader::getIsEdgeRemoved() const { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_reader.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline ::capnp::Data::Builder FamilyGraph::Builder::getIsEdgeRemoved() { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setIsEdgeRemoved( ::capnp::Data::Reader value) { ::capnp::_::PointerHelpers< ::capnp::Data>::set(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), value); } inline ::capnp::Data::Builder FamilyGraph::Builder::initIsEdgeRemoved(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::Data>::init(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptIsEdgeRemoved( ::capnp::Orphan< ::capnp::Data>&& value) { ::capnp::_::PointerHelpers< ::capnp::Data>::adopt(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::Data> FamilyGraph::Builder::disownIsEdgeRemoved() { return ::capnp::_::PointerHelpers< ::capnp::Data>::disown(_builder.getPointerField( ::capnp::bounded<5>() * ::capnp::POINTERS)); } inline bool FamilyGraph::Reader::hasIsNodeInDirtyList() const { return !_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline bool FamilyGraph::Builder::hasIsNodeInDirtyList() { return !_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS).isNull(); } inline ::capnp::Data::Reader FamilyGraph::Reader::getIsNodeInDirtyList() const { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_reader.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::capnp::Data::Builder FamilyGraph::Builder::getIsNodeInDirtyList() { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline void FamilyGraph::Builder::setIsNodeInDirtyList( ::capnp::Data::Reader value) { ::capnp::_::PointerHelpers< ::capnp::Data>::set(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), value); } inline ::capnp::Data::Builder FamilyGraph::Builder::initIsNodeInDirtyList(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::Data>::init(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), size); } inline void FamilyGraph::Builder::adoptIsNodeInDirtyList( ::capnp::Orphan< ::capnp::Data>&& value) { ::capnp::_::PointerHelpers< ::capnp::Data>::adopt(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::Data> FamilyGraph::Builder::disownIsNodeInDirtyList() { return ::capnp::_::PointerHelpers< ::capnp::Data>::disown(_builder.getPointerField( ::capnp::bounded<6>() * ::capnp::POINTERS)); } inline ::uint32_t FamilyGraph::Reader::getNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t FamilyGraph::Builder::getNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void FamilyGraph::Builder::setNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type NvBlastDataBlock::Reader::getDataType() const { return _reader.getDataField< ::Nv::Blast::Serialization::NvBlastDataBlock::Type>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::Nv::Blast::Serialization::NvBlastDataBlock::Type NvBlastDataBlock::Builder::getDataType() { return _builder.getDataField< ::Nv::Blast::Serialization::NvBlastDataBlock::Type>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastDataBlock::Builder::setDataType( ::Nv::Blast::Serialization::NvBlastDataBlock::Type value) { _builder.setDataField< ::Nv::Blast::Serialization::NvBlastDataBlock::Type>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastDataBlock::Reader::getFormatVersion() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastDataBlock::Builder::getFormatVersion() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvBlastDataBlock::Builder::setFormatVersion( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastDataBlock::Reader::getSize() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastDataBlock::Builder::getSize() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void NvBlastDataBlock::Builder::setSize( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline bool NvBlastChunk::Reader::hasCentroid() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastChunk::Builder::hasCentroid() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader NvBlastChunk::Reader::getCentroid() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder NvBlastChunk::Builder::getCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void NvBlastChunk::Builder::setCentroid( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void NvBlastChunk::Builder::setCentroid(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder NvBlastChunk::Builder::initCentroid(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void NvBlastChunk::Builder::adoptCentroid( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> NvBlastChunk::Builder::disownCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline float NvBlastChunk::Reader::getVolume() const { return _reader.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline float NvBlastChunk::Builder::getVolume() { return _builder.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setVolume(float value) { _builder.setDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getParentChunkIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getParentChunkIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setParentChunkIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getFirstChildIndex() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getFirstChildIndex() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setFirstChildIndex( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getChildIndexStop() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getChildIndexStop() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setChildIndexStop( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<3>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastChunk::Reader::getUserData() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastChunk::Builder::getUserData() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS); } inline void NvBlastChunk::Builder::setUserData( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<4>() * ::capnp::ELEMENTS, value); } inline bool NvBlastBond::Reader::hasNormal() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastBond::Builder::hasNormal() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader NvBlastBond::Reader::getNormal() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::getNormal() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void NvBlastBond::Builder::setNormal( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void NvBlastBond::Builder::setNormal(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::initNormal(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void NvBlastBond::Builder::adoptNormal( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> NvBlastBond::Builder::disownNormal() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline float NvBlastBond::Reader::getArea() const { return _reader.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline float NvBlastBond::Builder::getArea() { return _builder.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastBond::Builder::setArea(float value) { _builder.setDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline bool NvBlastBond::Reader::hasCentroid() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastBond::Builder::hasCentroid() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List<float>::Reader NvBlastBond::Reader::getCentroid() const { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::getCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void NvBlastBond::Builder::setCentroid( ::capnp::List<float>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline void NvBlastBond::Builder::setCentroid(::kj::ArrayPtr<const float> value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List<float>::Builder NvBlastBond::Builder::initCentroid(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void NvBlastBond::Builder::adoptCentroid( ::capnp::Orphan< ::capnp::List<float>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List<float>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List<float>> NvBlastBond::Builder::disownCentroid() { return ::capnp::_::PointerHelpers< ::capnp::List<float>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::uint32_t NvBlastBond::Reader::getUserData() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastBond::Builder::getUserData() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvBlastBond::Builder::setUserData( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline ::uint32_t NvBlastSupportGraph::Reader::getNodeCount() const { return _reader.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline ::uint32_t NvBlastSupportGraph::Builder::getNodeCount() { return _builder.getDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvBlastSupportGraph::Builder::setNodeCount( ::uint32_t value) { _builder.setDataField< ::uint32_t>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline bool NvBlastSupportGraph::Reader::hasChunkIndices() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasChunkIndices() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getChunkIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getChunkIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setChunkIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setChunkIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initChunkIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptChunkIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownChunkIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool NvBlastSupportGraph::Reader::hasAdjacencyPartition() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasAdjacencyPartition() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getAdjacencyPartition() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getAdjacencyPartition() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setAdjacencyPartition( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setAdjacencyPartition(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initAdjacencyPartition(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptAdjacencyPartition( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownAdjacencyPartition() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool NvBlastSupportGraph::Reader::hasAdjacentNodeIndices() const { return !_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasAdjacentNodeIndices() { return !_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getAdjacentNodeIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getAdjacentNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setAdjacentNodeIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setAdjacentNodeIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initAdjacentNodeIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptAdjacentNodeIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownAdjacentNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<2>() * ::capnp::POINTERS)); } inline bool NvBlastSupportGraph::Reader::hasAdjacentBondIndices() const { return !_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline bool NvBlastSupportGraph::Builder::hasAdjacentBondIndices() { return !_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader NvBlastSupportGraph::Reader::getAdjacentBondIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::getAdjacentBondIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline void NvBlastSupportGraph::Builder::setAdjacentBondIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline void NvBlastSupportGraph::Builder::setAdjacentBondIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder NvBlastSupportGraph::Builder::initAdjacentBondIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), size); } inline void NvBlastSupportGraph::Builder::adoptAdjacentBondIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> NvBlastSupportGraph::Builder::disownAdjacentBondIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<3>() * ::capnp::POINTERS)); } inline bool UUID::Reader::hasValue() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool UUID::Builder::hasValue() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::Data::Reader UUID::Reader::getValue() const { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::Data::Builder UUID::Builder::getValue() { return ::capnp::_::PointerHelpers< ::capnp::Data>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void UUID::Builder::setValue( ::capnp::Data::Reader value) { ::capnp::_::PointerHelpers< ::capnp::Data>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::Data::Builder UUID::Builder::initValue(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::Data>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void UUID::Builder::adoptValue( ::capnp::Orphan< ::capnp::Data>&& value) { ::capnp::_::PointerHelpers< ::capnp::Data>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::Data> UUID::Builder::disownValue() { return ::capnp::_::PointerHelpers< ::capnp::Data>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } } // namespace } // namespace } // namespace #endif // CAPNP_INCLUDED_9a4a58fac38375e0_
119,518
C
41.654889
141
0.682366
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/serialization/generated/NvBlastExtTkSerialization-capn.h
// Generated by Cap'n Proto compiler, DO NOT EDIT // source: NvBlastExtTkSerialization-capn #ifndef CAPNP_INCLUDED_affe4498f275ee58_ #define CAPNP_INCLUDED_affe4498f275ee58_ #include <capnp/generated-header-support.h> #if CAPNP_VERSION != 6001 #error "Version mismatch between generated code and library headers. You must use the same version of the Cap'n Proto compiler and library." #endif #include "NvBlastExtLlSerialization-capn.h" namespace capnp { namespace schemas { CAPNP_DECLARE_SCHEMA(ffd67c4b7067dde6); CAPNP_DECLARE_SCHEMA(b7dbad810488a897); CAPNP_DECLARE_SCHEMA(bf661e95794f2749); } // namespace schemas } // namespace capnp namespace Nv { namespace Blast { namespace Serialization { struct TkAsset { TkAsset() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(ffd67c4b7067dde6, 0, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct TkAssetJointDesc { TkAssetJointDesc() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(b7dbad810488a897, 0, 2) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; struct NvVec3 { NvVec3() = delete; class Reader; class Builder; class Pipeline; struct _capnpPrivate { CAPNP_DECLARE_STRUCT_HEADER(bf661e95794f2749, 2, 0) #if !CAPNP_LITE static constexpr ::capnp::_::RawBrandedSchema const* brand() { return &schema->defaultBrand; } #endif // !CAPNP_LITE }; }; // ======================================================================================= class TkAsset::Reader { public: typedef TkAsset Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasAssetLL() const; inline ::Nv::Blast::Serialization::Asset::Reader getAssetLL() const; inline bool hasJointDescs() const; inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader getJointDescs() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class TkAsset::Builder { public: typedef TkAsset Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasAssetLL(); inline ::Nv::Blast::Serialization::Asset::Builder getAssetLL(); inline void setAssetLL( ::Nv::Blast::Serialization::Asset::Reader value); inline ::Nv::Blast::Serialization::Asset::Builder initAssetLL(); inline void adoptAssetLL(::capnp::Orphan< ::Nv::Blast::Serialization::Asset>&& value); inline ::capnp::Orphan< ::Nv::Blast::Serialization::Asset> disownAssetLL(); inline bool hasJointDescs(); inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder getJointDescs(); inline void setJointDescs( ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder initJointDescs(unsigned int size); inline void adoptJointDescs(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>> disownJointDescs(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class TkAsset::Pipeline { public: typedef TkAsset Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} inline ::Nv::Blast::Serialization::Asset::Pipeline getAssetLL(); private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class TkAssetJointDesc::Reader { public: typedef TkAssetJointDesc Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline bool hasNodeIndices() const; inline ::capnp::List< ::uint32_t>::Reader getNodeIndices() const; inline bool hasAttachPositions() const; inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader getAttachPositions() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class TkAssetJointDesc::Builder { public: typedef TkAssetJointDesc Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline bool hasNodeIndices(); inline ::capnp::List< ::uint32_t>::Builder getNodeIndices(); inline void setNodeIndices( ::capnp::List< ::uint32_t>::Reader value); inline void setNodeIndices(::kj::ArrayPtr<const ::uint32_t> value); inline ::capnp::List< ::uint32_t>::Builder initNodeIndices(unsigned int size); inline void adoptNodeIndices(::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value); inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> disownNodeIndices(); inline bool hasAttachPositions(); inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder getAttachPositions(); inline void setAttachPositions( ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader value); inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder initAttachPositions(unsigned int size); inline void adoptAttachPositions(::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>&& value); inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>> disownAttachPositions(); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class TkAssetJointDesc::Pipeline { public: typedef TkAssetJointDesc Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE class NvVec3::Reader { public: typedef NvVec3 Reads; Reader() = default; inline explicit Reader(::capnp::_::StructReader base): _reader(base) {} inline ::capnp::MessageSize totalSize() const { return _reader.totalSize().asPublic(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return ::capnp::_::structString(_reader, *_capnpPrivate::brand()); } #endif // !CAPNP_LITE inline float getX() const; inline float getY() const; inline float getZ() const; private: ::capnp::_::StructReader _reader; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; template <typename, ::capnp::Kind> friend struct ::capnp::List; friend class ::capnp::MessageBuilder; friend class ::capnp::Orphanage; }; class NvVec3::Builder { public: typedef NvVec3 Builds; Builder() = delete; // Deleted to discourage incorrect usage. // You can explicitly initialize to nullptr instead. inline Builder(decltype(nullptr)) {} inline explicit Builder(::capnp::_::StructBuilder base): _builder(base) {} inline operator Reader() const { return Reader(_builder.asReader()); } inline Reader asReader() const { return *this; } inline ::capnp::MessageSize totalSize() const { return asReader().totalSize(); } #if !CAPNP_LITE inline ::kj::StringTree toString() const { return asReader().toString(); } #endif // !CAPNP_LITE inline float getX(); inline void setX(float value); inline float getY(); inline void setY(float value); inline float getZ(); inline void setZ(float value); private: ::capnp::_::StructBuilder _builder; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; friend class ::capnp::Orphanage; template <typename, ::capnp::Kind> friend struct ::capnp::_::PointerHelpers; }; #if !CAPNP_LITE class NvVec3::Pipeline { public: typedef NvVec3 Pipelines; inline Pipeline(decltype(nullptr)): _typeless(nullptr) {} inline explicit Pipeline(::capnp::AnyPointer::Pipeline&& typeless) : _typeless(kj::mv(typeless)) {} private: ::capnp::AnyPointer::Pipeline _typeless; friend class ::capnp::PipelineHook; template <typename, ::capnp::Kind> friend struct ::capnp::ToDynamic_; }; #endif // !CAPNP_LITE // ======================================================================================= inline bool TkAsset::Reader::hasAssetLL() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool TkAsset::Builder::hasAssetLL() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::Nv::Blast::Serialization::Asset::Reader TkAsset::Reader::getAssetLL() const { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::Nv::Blast::Serialization::Asset::Builder TkAsset::Builder::getAssetLL() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } #if !CAPNP_LITE inline ::Nv::Blast::Serialization::Asset::Pipeline TkAsset::Pipeline::getAssetLL() { return ::Nv::Blast::Serialization::Asset::Pipeline(_typeless.getPointerField(0)); } #endif // !CAPNP_LITE inline void TkAsset::Builder::setAssetLL( ::Nv::Blast::Serialization::Asset::Reader value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::Nv::Blast::Serialization::Asset::Builder TkAsset::Builder::initAssetLL() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void TkAsset::Builder::adoptAssetLL( ::capnp::Orphan< ::Nv::Blast::Serialization::Asset>&& value) { ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::Nv::Blast::Serialization::Asset> TkAsset::Builder::disownAssetLL() { return ::capnp::_::PointerHelpers< ::Nv::Blast::Serialization::Asset>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool TkAsset::Reader::hasJointDescs() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool TkAsset::Builder::hasJointDescs() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader TkAsset::Reader::getJointDescs() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder TkAsset::Builder::getJointDescs() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void TkAsset::Builder::setJointDescs( ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>::Builder TkAsset::Builder::initJointDescs(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void TkAsset::Builder::adoptJointDescs( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>> TkAsset::Builder::disownJointDescs() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::TkAssetJointDesc>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline bool TkAssetJointDesc::Reader::hasNodeIndices() const { return !_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline bool TkAssetJointDesc::Builder::hasNodeIndices() { return !_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::uint32_t>::Reader TkAssetJointDesc::Reader::getNodeIndices() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_reader.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline ::capnp::List< ::uint32_t>::Builder TkAssetJointDesc::Builder::getNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::get(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline void TkAssetJointDesc::Builder::setNodeIndices( ::capnp::List< ::uint32_t>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline void TkAssetJointDesc::Builder::setNodeIndices(::kj::ArrayPtr<const ::uint32_t> value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::set(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::uint32_t>::Builder TkAssetJointDesc::Builder::initNodeIndices(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::init(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), size); } inline void TkAssetJointDesc::Builder::adoptNodeIndices( ::capnp::Orphan< ::capnp::List< ::uint32_t>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::adopt(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::uint32_t>> TkAssetJointDesc::Builder::disownNodeIndices() { return ::capnp::_::PointerHelpers< ::capnp::List< ::uint32_t>>::disown(_builder.getPointerField( ::capnp::bounded<0>() * ::capnp::POINTERS)); } inline bool TkAssetJointDesc::Reader::hasAttachPositions() const { return !_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline bool TkAssetJointDesc::Builder::hasAttachPositions() { return !_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS).isNull(); } inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader TkAssetJointDesc::Reader::getAttachPositions() const { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::get(_reader.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder TkAssetJointDesc::Builder::getAttachPositions() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::get(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline void TkAssetJointDesc::Builder::setAttachPositions( ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Reader value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::set(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), value); } inline ::capnp::List< ::Nv::Blast::Serialization::NvVec3>::Builder TkAssetJointDesc::Builder::initAttachPositions(unsigned int size) { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::init(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), size); } inline void TkAssetJointDesc::Builder::adoptAttachPositions( ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>&& value) { ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::adopt(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS), kj::mv(value)); } inline ::capnp::Orphan< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>> TkAssetJointDesc::Builder::disownAttachPositions() { return ::capnp::_::PointerHelpers< ::capnp::List< ::Nv::Blast::Serialization::NvVec3>>::disown(_builder.getPointerField( ::capnp::bounded<1>() * ::capnp::POINTERS)); } inline float NvVec3::Reader::getX() const { return _reader.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline float NvVec3::Builder::getX() { return _builder.getDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS); } inline void NvVec3::Builder::setX(float value) { _builder.setDataField<float>( ::capnp::bounded<0>() * ::capnp::ELEMENTS, value); } inline float NvVec3::Reader::getY() const { return _reader.getDataField<float>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline float NvVec3::Builder::getY() { return _builder.getDataField<float>( ::capnp::bounded<1>() * ::capnp::ELEMENTS); } inline void NvVec3::Builder::setY(float value) { _builder.setDataField<float>( ::capnp::bounded<1>() * ::capnp::ELEMENTS, value); } inline float NvVec3::Reader::getZ() const { return _reader.getDataField<float>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline float NvVec3::Builder::getZ() { return _builder.getDataField<float>( ::capnp::bounded<2>() * ::capnp::ELEMENTS); } inline void NvVec3::Builder::setZ(float value) { _builder.setDataField<float>( ::capnp::bounded<2>() * ::capnp::ELEMENTS, value); } } // namespace } // namespace } // namespace #endif // CAPNP_INCLUDED_affe4498f275ee58_
20,761
C
37.448148
141
0.682193
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringInternalCommon.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTINTERNALCOMMON_H #define NVBLASTINTERNALCOMMON_H #include "NvBlastExtAuthoringTypes.h" #include "NvBlastNvSharedHelpers.h" #include "NvBlastVolumeIntegrals.h" #include "NvVec2.h" #include "NvVec3.h" #include "NvPlane.h" #include "NvBounds3.h" #include "NvMath.h" #include <algorithm> namespace Nv { namespace Blast { /** Edge representation with index of parent facet */ struct EdgeWithParent { uint32_t s, e; // Starting and ending vertices uint32_t parent; // Parent facet index EdgeWithParent() : s(0), e(0), parent(0) {} EdgeWithParent(uint32_t s, uint32_t e, uint32_t p) : s(s), e(e), parent(p) {} }; /** Comparator for sorting edges according to parent facet number. */ struct EdgeComparator { bool operator()(const EdgeWithParent& a, const EdgeWithParent& b) const { if (a.parent == b.parent) { if (a.s == b.s) { return a.e < b.e; } else { return a.s < b.s; } } else { return a.parent < b.parent; } } }; inline bool operator<(const Edge& a, const Edge& b) { if (a.s == b.s) return a.e < b.e; else return a.s < b.s; } /** Vertex projection direction flag. */ enum ProjectionDirections { YZ_PLANE = 1 << 1, XY_PLANE = 1 << 2, ZX_PLANE = 1 << 3, // This is set when the dominant axis of the normal is negative // because when flattening to 2D the facet is viewed from the positive direction. // As a result, the winding order appears to flip if the normal is in the negative direction. OPPOSITE_WINDING = 1 << 4 }; /** Computes best direction to project points. */ NV_FORCE_INLINE ProjectionDirections getProjectionDirection(const nvidia::NvVec3& normal) { float maxv = std::max(std::abs(normal.x), std::max(std::abs(normal.y), std::abs(normal.z))); ProjectionDirections retVal; if (maxv == std::abs(normal.x)) { retVal = YZ_PLANE; if (normal.x < 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING); return retVal; } if (maxv == std::abs(normal.y)) { retVal = ZX_PLANE; if (normal.y > 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING); return retVal; } retVal = XY_PLANE; if (normal.z < 0) retVal = (ProjectionDirections)((int)retVal | (int)OPPOSITE_WINDING); return retVal; } /** Computes point projected on given axis aligned plane. */ NV_FORCE_INLINE nvidia::NvVec2 getProjectedPoint(const nvidia::NvVec3& point, ProjectionDirections dir) { if (dir & YZ_PLANE) { return nvidia::NvVec2(point.y, point.z); } if (dir & ZX_PLANE) { return nvidia::NvVec2(point.x, point.z); } return nvidia::NvVec2(point.x, point.y); } NV_FORCE_INLINE nvidia::NvVec2 getProjectedPoint(const NvcVec3& point, ProjectionDirections dir) { return getProjectedPoint((const nvidia::NvVec3&)point, dir); } /** Computes point projected on given axis aligned plane, this method is polygon-winding aware. */ NV_FORCE_INLINE nvidia::NvVec2 getProjectedPointWithWinding(const nvidia::NvVec3& point, ProjectionDirections dir) { if (dir & YZ_PLANE) { if (dir & OPPOSITE_WINDING) { return nvidia::NvVec2(point.z, point.y); } else return nvidia::NvVec2(point.y, point.z); } if (dir & ZX_PLANE) { if (dir & OPPOSITE_WINDING) { return nvidia::NvVec2(point.z, point.x); } return nvidia::NvVec2(point.x, point.z); } if (dir & OPPOSITE_WINDING) { return nvidia::NvVec2(point.y, point.x); } return nvidia::NvVec2(point.x, point.y); } #define MAXIMUM_EXTENT 1000 * 1000 * 1000 #define BBOX_TEST_EPS 1e-5f /** Test fattened bounding box intersetion. */ NV_INLINE bool weakBoundingBoxIntersection(const nvidia::NvBounds3& aBox, const nvidia::NvBounds3& bBox) { if (std::max(aBox.minimum.x, bBox.minimum.x) > std::min(aBox.maximum.x, bBox.maximum.x) + BBOX_TEST_EPS) return false; if (std::max(aBox.minimum.y, bBox.minimum.y) > std::min(aBox.maximum.y, bBox.maximum.y) + BBOX_TEST_EPS) return false; if (std::max(aBox.minimum.z, bBox.minimum.z) > std::min(aBox.maximum.z, bBox.maximum.z) + BBOX_TEST_EPS) return false; return true; } /** Test segment vs plane intersection. If segment intersects the plane true is returned. Point of intersection is written into 'result'. */ NV_INLINE bool getPlaneSegmentIntersection(const nvidia::NvPlane& pl, const nvidia::NvVec3& a, const nvidia::NvVec3& b, nvidia::NvVec3& result) { float div = (b - a).dot(pl.n); if (nvidia::NvAbs(div) < 0.0001f) { if (pl.contains(a)) { result = a; return true; } else { return false; } } float t = (-a.dot(pl.n) - pl.d) / div; if (t < 0.0f || t > 1.0f) { return false; } result = (b - a) * t + a; return true; } #define POS_COMPARISON_OFFSET 1e-5f #define NORM_COMPARISON_OFFSET 1e-3f /** Vertex comparator for vertex welding. */ template<bool splitUVs> struct VrtCompare { // This implements a "less than" function for vertices. // Vertices a and b are considered equivalent if !(a < b) && !(b < a) bool operator()(const Vertex& a, const Vertex& b) const { if (a.p.x + POS_COMPARISON_OFFSET < b.p.x) return true; if (a.p.x - POS_COMPARISON_OFFSET > b.p.x) return false; if (a.p.y + POS_COMPARISON_OFFSET < b.p.y) return true; if (a.p.y - POS_COMPARISON_OFFSET > b.p.y) return false; if (a.p.z + POS_COMPARISON_OFFSET < b.p.z) return true; if (a.p.z - POS_COMPARISON_OFFSET > b.p.z) return false; if (a.n.x + NORM_COMPARISON_OFFSET < b.n.x) return true; if (a.n.x - NORM_COMPARISON_OFFSET > b.n.x) return false; if (a.n.y + NORM_COMPARISON_OFFSET < b.n.y) return true; if (a.n.y - NORM_COMPARISON_OFFSET > b.n.y) return false; if (a.n.z + NORM_COMPARISON_OFFSET < b.n.z) return true; if (a.n.z - NORM_COMPARISON_OFFSET > b.n.z) return false; // This is not actually needed if (!splitUVs) if (!splitUVs) return false; if (a.uv[0].x + NORM_COMPARISON_OFFSET < b.uv[0].x) return true; if (a.uv[0].x - NORM_COMPARISON_OFFSET > b.uv[0].x) return false; if (a.uv[0].y + NORM_COMPARISON_OFFSET < b.uv[0].y) return true; if (a.uv[0].y - NORM_COMPARISON_OFFSET > b.uv[0].y) return false; // This is not actually needed return false; }; }; typedef VrtCompare<true> VrtComp; typedef VrtCompare<false> VrtCompNoUV; /** Vertex comparator for vertex welding (not accounts normal and uv parameters of vertice). */ struct VrtPositionComparator { bool operator()(const NvcVec3& a, const NvcVec3& b) const { if (a.x + POS_COMPARISON_OFFSET < b.x) return true; if (a.x - POS_COMPARISON_OFFSET > b.x) return false; if (a.y + POS_COMPARISON_OFFSET < b.y) return true; if (a.y - POS_COMPARISON_OFFSET > b.y) return false; if (a.z + POS_COMPARISON_OFFSET < b.z) return true; if (a.z - POS_COMPARISON_OFFSET > b.z) return false; return false; }; bool operator()(const Vertex& a, const Vertex& b) const { return operator()(a.p, b.p); }; }; NV_INLINE float calculateCollisionHullVolumeAndCentroid(NvcVec3& centroid, const CollisionHull& hull) { class CollisionHullQuery { public: CollisionHullQuery(const CollisionHull& hull) : m_hull(hull) {} size_t faceCount() const { return (size_t)m_hull.polygonDataCount; } size_t vertexCount(size_t faceIndex) const { return (size_t)m_hull.polygonData[faceIndex].vertexCount; } NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) const { return m_hull.points[m_hull.indices[m_hull.polygonData[faceIndex].indexBase + vertexIndex]]; } private: const CollisionHull& m_hull; }; return calculateMeshVolumeAndCentroid<CollisionHullQuery>(centroid, hull); } } // namespace Blast } // namespace Nv #endif
9,905
C
30.150943
133
0.639475
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringAcceleratorImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastExtAuthoringAcceleratorImpl.h" #include "NvBlastExtAuthoringMesh.h" #include "NvBlastExtAuthoringInternalCommon.h" #include "NvBlastGlobals.h" #include "NvBlastNvSharedHelpers.h" #include "NvCMath.h" namespace Nv { namespace Blast { DummyAccelerator::DummyAccelerator(int32_t count) : m_count(count) { m_current = 0; } void DummyAccelerator::release() { NVBLAST_DELETE(this, DummyAccelerator); } void DummyAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { m_current = 0; NV_UNUSED(pos); NV_UNUSED(ed); NV_UNUSED(fc); } void DummyAccelerator::setState(const NvcBounds3* bound) { m_current = 0; NV_UNUSED(bound); } void DummyAccelerator::setState(const NvcVec3& point) { m_current = 0; NV_UNUSED(point); } int32_t DummyAccelerator::getNextFacet() { if (m_current < m_count) { ++m_current; return m_current - 1; } else return -1; } Grid::Grid(int32_t resolution) : m_resolution(resolution) { /** Set up 3d grid */ m_r3 = resolution * resolution * resolution; m_spatialMap.resize(resolution * resolution * resolution); } void Grid::release() { NVBLAST_DELETE(this, Grid); } void Grid::setMesh(const Mesh* m) { nvidia::NvBounds3 bd = toNvShared(m->getBoundingBox()); m_mappedFacetCount = m->getFacetCount(); bd.fattenFast(0.001f); m_spos = fromNvShared(bd.minimum); m_deltas = { m_resolution / bd.getDimensions().x, m_resolution / bd.getDimensions().y, m_resolution / bd.getDimensions().z }; for (int32_t i = 0; i < m_r3; ++i) m_spatialMap[i].clear(); const float ofs = 0.001f; for (uint32_t fc = 0; fc < m->getFacetCount(); ++fc) { NvcBounds3 cfc = *m->getFacetBound(fc); int32_t is = (int32_t)std::max(0.f, (cfc.minimum.x - m_spos.x - ofs) * m_deltas.x); int32_t ie = (int32_t)std::max(0.f, (cfc.maximum.x - m_spos.x + ofs) * m_deltas.x); int32_t js = (int32_t)std::max(0.f, (cfc.minimum.y - m_spos.y - ofs) * m_deltas.y); int32_t je = (int32_t)std::max(0.f, (cfc.maximum.y - m_spos.y + ofs) * m_deltas.y); int32_t ks = (int32_t)std::max(0.f, (cfc.minimum.z - m_spos.z - ofs) * m_deltas.z); int32_t ke = (int32_t)std::max(0.f, (cfc.maximum.z - m_spos.z + ofs) * m_deltas.z); for (int32_t i = is; i < m_resolution && i <= ie; ++i) { for (int32_t j = js; j < m_resolution && j <= je; ++j) { for (int32_t k = ks; k < m_resolution && k <= ke; ++k) { m_spatialMap[(i * m_resolution + j) * m_resolution + k].push_back(fc); } } } } } GridAccelerator::GridAccelerator(Grid* grd) { m_grid = grd; m_alreadyGotValue = 0; m_alreadyGotFlag.resize(1 << 12); m_cellList.resize(1 << 12); m_pointCmdDir = 0; } void GridAccelerator::release() { NVBLAST_DELETE(this, GridAccelerator); } void GridAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { nvidia::NvBounds3 cfc(nvidia::NvBounds3::empty()); for (uint32_t v = 0; v < fc.edgesCount; ++v) { cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].s].p)); cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].e].p)); } setState(&fromNvShared(cfc)); } void GridAccelerator::setState(const NvcBounds3* facetBounding) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; NvcBounds3 cfc = *facetBounding; int32_t is = (int32_t)std::max(0.f, (cfc.minimum.x - m_grid->m_spos.x - 0.001f) * m_grid->m_deltas.x); int32_t ie = (int32_t)std::max(0.f, (cfc.maximum.x - m_grid->m_spos.x + 0.001f) * m_grid->m_deltas.x); int32_t js = (int32_t)std::max(0.f, (cfc.minimum.y - m_grid->m_spos.y - 0.001f) * m_grid->m_deltas.y); int32_t je = (int32_t)std::max(0.f, (cfc.maximum.y - m_grid->m_spos.y + 0.001f) * m_grid->m_deltas.y); int32_t ks = (int32_t)std::max(0.f, (cfc.minimum.z - m_grid->m_spos.z - 0.001f) * m_grid->m_deltas.z); int32_t ke = (int32_t)std::max(0.f, (cfc.maximum.z - m_grid->m_spos.z + 0.001f) * m_grid->m_deltas.z); for (int32_t i = is; i < m_grid->m_resolution && i <= ie; ++i) { for (int32_t j = js; j < m_grid->m_resolution && j <= je; ++j) { for (int32_t k = ks; k < m_grid->m_resolution && k <= ke; ++k) { int32_t id = (i * m_grid->m_resolution + j) * m_grid->m_resolution + k; if (!m_grid->m_spatialMap[id].empty()) { m_cellList[m_gotCells++] = id; } } } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } void GridAccelerator::setPointCmpDirection(int32_t d) { m_pointCmdDir = d; } void GridAccelerator::setState(const NvcVec3& point) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; int32_t is = (int32_t)std::max(0.f, (point.x - m_grid->m_spos.x - 0.001f) * m_grid->m_deltas.x); int32_t ie = (int32_t)std::max(0.f, (point.x - m_grid->m_spos.x + 0.001f) * m_grid->m_deltas.x); int32_t js = (int32_t)std::max(0.f, (point.y - m_grid->m_spos.y - 0.001f) * m_grid->m_deltas.y); int32_t je = (int32_t)std::max(0.f, (point.y - m_grid->m_spos.y + 0.001f) * m_grid->m_deltas.y); int32_t ks = 0; int32_t ke = m_grid->m_resolution; switch (m_pointCmdDir) { case 1: ks = (int32_t)std::max(0.f, (point.z - m_grid->m_spos.z - 0.001f) * m_grid->m_deltas.z); break; case -1: ke = (int32_t)std::max(0.f, (point.z - m_grid->m_spos.z + 0.001f) * m_grid->m_deltas.z); } for (int32_t i = is; i < m_grid->m_resolution && i <= ie; ++i) { for (int32_t j = js; j < m_grid->m_resolution && j <= je; ++j) { for (int32_t k = ks; k <= ke && k < m_grid->m_resolution; ++k) { int32_t id = (i * m_grid->m_resolution + j) * m_grid->m_resolution + k; if (!m_grid->m_spatialMap[id].empty()) { m_cellList[m_gotCells++] = id; } } } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } int32_t GridAccelerator::getNextFacet() { int32_t facetId = -1; while (m_iteratorCell != -1) { if (m_iteratorFacet >= (int32_t)m_grid->m_spatialMap[m_iteratorCell].size()) { if (m_gotCells != 0) { m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; m_iteratorFacet = 0; } else { m_iteratorCell = -1; break; } } if (m_alreadyGotFlag[m_grid->m_spatialMap[m_iteratorCell][m_iteratorFacet]] != m_alreadyGotValue) { facetId = m_grid->m_spatialMap[m_iteratorCell][m_iteratorFacet]; m_iteratorFacet++; break; } else { m_iteratorFacet++; } } if (facetId != -1) { m_alreadyGotFlag[facetId] = m_alreadyGotValue; } return facetId; } BBoxBasedAccelerator::BBoxBasedAccelerator(const Mesh* mesh, int32_t resolution) : m_resolution(resolution), m_alreadyGotValue(1) { m_bounds = mesh->getBoundingBox(); m_spatialMap.resize(resolution * resolution * resolution); m_cells.resize(resolution * resolution * resolution); int32_t currentCell = 0; NvcVec3 incr = (m_bounds.maximum - m_bounds.minimum) * (1.0f / m_resolution); for (int32_t z = 0; z < resolution; ++z) { for (int32_t y = 0; y < resolution; ++y) { for (int32_t x = 0; x < resolution; ++x) { m_cells[currentCell].minimum.x = m_bounds.minimum.x + x * incr.x; m_cells[currentCell].minimum.y = m_bounds.minimum.y + y * incr.y; m_cells[currentCell].minimum.z = m_bounds.minimum.z + z * incr.z; m_cells[currentCell].maximum.x = m_bounds.minimum.x + (x + 1) * incr.x; m_cells[currentCell].maximum.y = m_bounds.minimum.y + (y + 1) * incr.y; m_cells[currentCell].maximum.z = m_bounds.minimum.z + (z + 1) * incr.z; ++currentCell; } } } m_cellList.resize(1 << 16); m_gotCells = 0; buildAccelStructure(mesh->getVertices(), mesh->getEdges(), mesh->getFacetsBuffer(), mesh->getFacetCount()); } void BBoxBasedAccelerator::release() { NVBLAST_DELETE(this, BBoxBasedAccelerator); } BBoxBasedAccelerator::~BBoxBasedAccelerator() { m_resolution = 0; toNvShared(m_bounds).setEmpty(); m_spatialMap.clear(); m_cells.clear(); m_cellList.clear(); } int32_t BBoxBasedAccelerator::getNextFacet() { int32_t facetId = -1; while (m_iteratorCell != -1) { if (m_iteratorFacet >= (int32_t)m_spatialMap[m_iteratorCell].size()) { if (m_gotCells != 0) { m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; m_iteratorFacet = 0; } else { m_iteratorCell = -1; break; } } if (m_alreadyGotFlag[m_spatialMap[m_iteratorCell][m_iteratorFacet]] != m_alreadyGotValue) { facetId = m_spatialMap[m_iteratorCell][m_iteratorFacet]; m_iteratorFacet++; break; } else { m_iteratorFacet++; } } if (facetId != -1) { m_alreadyGotFlag[facetId] = m_alreadyGotValue; } return facetId; } void BBoxBasedAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { nvidia::NvBounds3 cfc(nvidia::NvBounds3::empty()); for (uint32_t v = 0; v < fc.edgesCount; ++v) { cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].s].p)); cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].e].p)); } setState(&fromNvShared(cfc)); } void BBoxBasedAccelerator::setState(const NvcBounds3* facetBox) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; for (uint32_t i = 0; i < m_cells.size(); ++i) { if (weakBoundingBoxIntersection(toNvShared(m_cells[i]), *toNvShared(facetBox))) { if (!m_spatialMap[i].empty()) m_cellList[m_gotCells++] = i; } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } void BBoxBasedAccelerator::setState(const NvcVec3& p) { m_alreadyGotValue++; m_iteratorCell = -1; m_iteratorFacet = -1; m_gotCells = 0; int32_t perSlice = m_resolution * m_resolution; for (uint32_t i = 0; i < m_cells.size(); ++i) { if (toNvShared(m_cells[i]).contains(toNvShared(p))) { int32_t xyCellId = i % perSlice; for (int32_t zCell = 0; zCell < m_resolution; ++zCell) { int32_t cell = zCell * perSlice + xyCellId; if (!m_spatialMap[cell].empty()) m_cellList[m_gotCells++] = cell; } } } if (m_gotCells != 0) { m_iteratorFacet = 0; m_iteratorCell = m_cellList[m_gotCells - 1]; m_gotCells--; } } void BBoxBasedAccelerator::buildAccelStructure(const Vertex* pos, const Edge* edges, const Facet* fc, int32_t facetCount) { for (int32_t facet = 0; facet < facetCount; ++facet) { nvidia::NvBounds3 bBox; bBox.setEmpty(); const Edge* edge = &edges[0] + fc->firstEdgeNumber; int32_t count = fc->edgesCount; for (int32_t ec = 0; ec < count; ++ec) { bBox.include(toNvShared(pos[edge->s].p)); bBox.include(toNvShared(pos[edge->e].p)); edge++; } for (uint32_t i = 0; i < m_cells.size(); ++i) { if (weakBoundingBoxIntersection(toNvShared(m_cells[i]), bBox)) { m_spatialMap[i].push_back(facet); } } fc++; } m_alreadyGotFlag.resize(facetCount, 0); } #define SWEEP_RESOLUTION 2048 void buildIndex(std::vector<SegmentToIndex>& segm, float offset, float mlt, std::vector<std::vector<uint32_t>>& blocks) { std::set<uint32_t> currentEnabled; uint32_t lastBlock = 0; for (uint32_t i = 0; i < segm.size(); ++i) { uint32_t currentBlock = (uint32_t)((segm[i].coord - offset) * mlt); if (currentBlock >= SWEEP_RESOLUTION) break; if (currentBlock != lastBlock) { for (uint32_t j = lastBlock + 1; j <= currentBlock; ++j) { for (auto id : currentEnabled) blocks[j].push_back(id); } lastBlock = currentBlock; } if (segm[i].end == false) { blocks[lastBlock].push_back(segm[i].index); currentEnabled.insert(segm[i].index); } else { currentEnabled.erase(segm[i].index); } } } SweepingAccelerator::SweepingAccelerator(const Nv::Blast::Mesh* in) { nvidia::NvBounds3 bnd; const Vertex* verts = in->getVertices(); const Edge* edges = in->getEdges(); m_facetCount = in->getFacetCount(); m_foundx.resize(m_facetCount, 0); m_foundy.resize(m_facetCount, 0); std::vector<SegmentToIndex> xevs; std::vector<SegmentToIndex> yevs; std::vector<SegmentToIndex> zevs; for (uint32_t i = 0; i < in->getFacetCount(); ++i) { const Facet* fc = in->getFacet(i); bnd.setEmpty(); for (uint32_t v = 0; v < fc->edgesCount; ++v) { bnd.include(toNvShared(verts[edges[v + fc->firstEdgeNumber].s].p)); } bnd.scaleFast(1.1f); xevs.push_back(SegmentToIndex(bnd.minimum.x, i, false)); xevs.push_back(SegmentToIndex(bnd.maximum.x, i, true)); yevs.push_back(SegmentToIndex(bnd.minimum.y, i, false)); yevs.push_back(SegmentToIndex(bnd.maximum.y, i, true)); zevs.push_back(SegmentToIndex(bnd.minimum.z, i, false)); zevs.push_back(SegmentToIndex(bnd.maximum.z, i, true)); } std::sort(xevs.begin(), xevs.end()); std::sort(yevs.begin(), yevs.end()); std::sort(zevs.begin(), zevs.end()); m_minimal.x = xevs[0].coord; m_minimal.y = yevs[0].coord; m_minimal.z = zevs[0].coord; m_maximal.x = xevs.back().coord; m_maximal.y = yevs.back().coord; m_maximal.z = zevs.back().coord; m_rescale = (m_maximal - m_minimal) * 1.01f; m_rescale.x = 1.0f / m_rescale.x * SWEEP_RESOLUTION; m_rescale.y = 1.0f / m_rescale.y * SWEEP_RESOLUTION; m_rescale.z = 1.0f / m_rescale.z * SWEEP_RESOLUTION; m_xSegm.resize(SWEEP_RESOLUTION); m_ySegm.resize(SWEEP_RESOLUTION); m_zSegm.resize(SWEEP_RESOLUTION); buildIndex(xevs, m_minimal.x, m_rescale.x, m_xSegm); buildIndex(yevs, m_minimal.y, m_rescale.y, m_ySegm); buildIndex(zevs, m_minimal.z, m_rescale.z, m_zSegm); m_iterId = 1; m_current = 0; } void SweepingAccelerator::release() { NVBLAST_DELETE(this, SweepingAccelerator); } void SweepingAccelerator::setState(const NvcBounds3* facetBounds) { m_current = 0; m_indices.clear(); nvidia::NvBounds3 bnd = *toNvShared(facetBounds); bnd.scaleFast(1.1f); uint32_t start = (uint32_t)((std::max(0.0f, bnd.minimum.x - m_minimal.x)) * m_rescale.x); uint32_t end = (uint32_t)((std::max(0.0f, bnd.maximum.x - m_minimal.x)) * m_rescale.x); for (uint32_t i = start; i <= end && i < SWEEP_RESOLUTION; ++i) { for (auto id : m_xSegm[i]) { m_foundx[id] = m_iterId; } } start = (uint32_t)((std::max(0.0f, bnd.minimum.y - m_minimal.y)) * m_rescale.y); end = (uint32_t)((std::max(0.0f, bnd.maximum.y - m_minimal.y)) * m_rescale.y); for (uint32_t i = start; i <= end && i < SWEEP_RESOLUTION; ++i) { for (auto id : m_ySegm[i]) { m_foundy[id] = m_iterId; } } start = (uint32_t)((std::max(0.0f, bnd.minimum.z - m_minimal.z)) * m_rescale.z); end = (uint32_t)((std::max(0.0f, bnd.maximum.z - m_minimal.z)) * m_rescale.z); for (uint32_t i = start; i <= end && i < SWEEP_RESOLUTION; ++i) { for (auto id : m_zSegm[i]) { if (m_foundy[id] == m_iterId && m_foundx[id] == m_iterId) { m_foundx[id] = m_iterId + 1; m_foundy[id] = m_iterId + 1; m_indices.push_back(id); } } } m_iterId += 2; } void SweepingAccelerator::setState(const Vertex* pos, const Edge* ed, const Facet& fc) { nvidia::NvBounds3 cfc(nvidia::NvBounds3::empty()); for (uint32_t v = 0; v < fc.edgesCount; ++v) { cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].s].p)); cfc.include(toNvShared(pos[ed[fc.firstEdgeNumber + v].e].p)); } setState(&fromNvShared(cfc)); } void SweepingAccelerator::setState(const NvcVec3& point) { m_indices.clear(); /*for (uint32_t i = 0; i < facetCount; ++i) { indices.push_back(i); }*/ uint32_t yIndex = (uint32_t)((point.y - m_minimal.y) * m_rescale.y); uint32_t xIndex = (uint32_t)((point.x - m_minimal.x) * m_rescale.x); for (uint32_t i = 0; i < m_xSegm[xIndex].size(); ++i) { m_foundx[m_xSegm[xIndex][i]] = m_iterId; } for (uint32_t i = 0; i < m_ySegm[yIndex].size(); ++i) { if (m_foundx[m_ySegm[yIndex][i]] == m_iterId) { m_indices.push_back(m_ySegm[yIndex][i]); } } m_iterId++; m_current = 0; NV_UNUSED(point); } int32_t SweepingAccelerator::getNextFacet() { if (static_cast<uint32_t>(m_current) < m_indices.size()) { ++m_current; return m_indices[m_current - 1]; } else return -1; } } // namespace Blast } // namespace Nv
19,981
C++
28.602963
129
0.563285
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringAcceleratorImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTEXTAUTHORINGACCELERATORIMPL_H #define NVBLASTEXTAUTHORINGACCELERATORIMPL_H #include <set> #include <vector> #include "NvBlastExtAuthoringAccelerator.h" namespace Nv { namespace Blast { class Mesh; /** Dummy accelerator iterates through all facets of mesh. */ class DummyAccelerator : public SpatialAccelerator { public: /** \param[in] count Mesh facets count for which accelerator should be built. */ DummyAccelerator(int32_t count); virtual void release() override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcVec3& point) override; virtual int32_t getNextFacet() override; virtual void setPointCmpDirection(int32_t dir) override { NV_UNUSED(dir); } private: int32_t m_count; int32_t m_current; }; struct SegmentToIndex { float coord; uint32_t index; bool end; SegmentToIndex(float c, uint32_t i, bool end) : coord(c), index(i), end(end) {} bool operator<(const SegmentToIndex& in) const { if (coord < in.coord) return true; if (coord > in.coord) return false; return end < in.end; } }; class Grid : public SpatialGrid { public: friend class GridAccelerator; Grid(int32_t resolution); virtual void release() override; virtual void setMesh(const Nv::Blast::Mesh* m) override; private: int32_t m_resolution; int32_t m_r3; int32_t m_mappedFacetCount; NvcVec3 m_spos; NvcVec3 m_deltas; std::vector< std::vector<int32_t> > m_spatialMap; }; class GridAccelerator : public SpatialAccelerator // Iterator to traverse the grid { public: GridAccelerator(Grid* grd); virtual void release() override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcVec3& point) override; virtual int32_t getNextFacet() override; virtual void setPointCmpDirection(int32_t dir) override; private: Grid* m_grid; // Iterator data std::vector<uint32_t> m_alreadyGotFlag; uint32_t m_alreadyGotValue; std::vector<int32_t> m_cellList; int32_t m_gotCells; int32_t m_iteratorCell; int32_t m_iteratorFacet; int32_t m_pointCmdDir; }; class SweepingAccelerator : public SpatialAccelerator { public: /** \param[in] count Mesh facets count for which accelerator should be built. */ SweepingAccelerator(const Nv::Blast::Mesh* in); virtual void release() override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const NvcVec3& point) override; virtual int32_t getNextFacet() override; virtual void setPointCmpDirection(int32_t dir) override { NV_UNUSED(dir); } private: /* For fast point test. */ std::vector<std::vector<uint32_t> > m_xSegm; std::vector<std::vector<uint32_t> > m_ySegm; std::vector<std::vector<uint32_t> > m_zSegm; std::vector<uint32_t> m_indices; std::vector<uint32_t> m_foundx; std::vector<uint32_t> m_foundy; uint32_t m_iterId; int32_t m_current; uint32_t m_facetCount; NvcVec3 m_minimal; NvcVec3 m_maximal; NvcVec3 m_rescale; }; /** Accelerator which builds map from 3d grid to initial mesh facets. To find all facets which possibly intersect given one, it return all facets which are pointed by grid cells, which intersects with bounding box of given facet. To find all facets which possibly cover given point, all facets which are pointed by cells in column which contains given point are returned. */ class BBoxBasedAccelerator : public SpatialAccelerator { public: /** \param[in] mesh Mesh for which acceleration structure should be built. \param[in] resolution Resolution on 3d grid. */ BBoxBasedAccelerator(const Mesh* mesh, int32_t resolution); virtual ~BBoxBasedAccelerator(); virtual void release() override; virtual int32_t getNextFacet() override; virtual void setState(const Vertex* pos, const Edge* ed, const Facet& fc) override; virtual void setState(const NvcBounds3* bounds) override; virtual void setState(const NvcVec3& p) override; virtual void setPointCmpDirection(int32_t dir) override { NV_UNUSED(dir); } private: void buildAccelStructure(const Vertex* pos, const Edge* edges, const Facet* fc, int32_t facetCount); int32_t m_resolution; NvcBounds3 m_bounds; std::vector< std::vector<int32_t> > m_spatialMap; std::vector<NvcBounds3> m_cells; // Iterator data std::vector<uint32_t> m_alreadyGotFlag; uint32_t m_alreadyGotValue; std::vector<int32_t> m_cellList; int32_t m_gotCells; int32_t m_iteratorCell; int32_t m_iteratorFacet; }; } // namespace Blast } // namsepace Nv #endif // ifndef NVBLASTEXTAUTHORINGACCELERATORIMPL_H
7,801
C
35.12037
171
0.610563
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringMeshImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTAUTHORINGMESHIMPL_H #define NVBLASTAUTHORINGMESHIMPL_H #include "NvBlastExtAuthoringMesh.h" #include "NvBounds3.h" #include <vector> #include <map> #include <set> namespace Nv { namespace Blast { /** Class for internal mesh representation */ class MeshImpl : public Mesh { public: /** Constructs mesh object from array of triangles. \param[in] position Array of vertex positions \param[in] normals Array of vertex normals \param[in] uv Array of vertex uv coordinates \param[in] verticesCount Vertices count \param[in] indices Array of vertex indices. Indices contain vertex index triplets which form a mesh triangle. \param[in] indicesCount Indices count (should be equal to numberOfTriangles * 3) */ MeshImpl(const NvcVec3* position, const NvcVec3* normals, const NvcVec2* uv, uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount); /** Constructs mesh object from array of facets. \param[in] vertices Array of vertices \param[in] edges Array of edges \param[in] facets Array of facets \param[in] posCount Vertices count \param[in] edgesCount Edges count \param[in] facetsCount Facets count */ MeshImpl(const Vertex* vertices, const Edge* edges, const Facet* facets, uint32_t posCount, uint32_t edgesCount, uint32_t facetsCount); MeshImpl(const Vertex* vertices, uint32_t count); MeshImpl(const Vertex* vertices, uint32_t count, uint32_t* indices, uint32_t indexCount, void* materials, uint32_t materialStride); ~MeshImpl(); virtual void release() override; /** Return true if mesh is valid */ bool isValid() const override; /** Return pointer on vertices array */ Vertex* getVerticesWritable() override; /** Return pointer on edges array */ Edge* getEdgesWritable() override; /** Return pointer on facets array */ Facet* getFacetsBufferWritable() override; /** Return pointer on vertices array */ const Vertex* getVertices() const override; /** Return pointer on edges array */ const Edge* getEdges() const override; /** Return pointer on facets array */ const Facet* getFacetsBuffer() const override; /** Return writable pointer on specified facet */ Facet* getFacetWritable(int32_t facet) override; /** Return writable pointer on specified facet */ const Facet* getFacet(int32_t facet) const override; /** Return edges count */ uint32_t getEdgesCount() const override; /** Return vertices count */ uint32_t getVerticesCount() const override; /** Return facet count */ uint32_t getFacetCount() const override; /** Return reference on mesh bounding box. */ const NvcBounds3& getBoundingBox() const override; /** Return writable reference on mesh bounding box. */ NvcBounds3& getBoundingBoxWritable() override; /** Recalculate bounding box */ void recalculateBoundingBox() override; /** Compute mesh volume and centroid. Assumes mesh has outward normals and no holes. */ float getMeshVolumeAndCentroid(NvcVec3& centroid) const override; /** Set per-facet material id. */ void setMaterialId(const int32_t* materialIds) override; /** Replaces an material id on faces with a new one */ void replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) override; /** Set per-facet smoothing group. */ void setSmoothingGroup(const int32_t* smoothingGroups) override; /** Calculate per-facet bounding boxes. */ virtual void calcPerFacetBounds() override; /** Get pointer on facet bounding box, if not calculated return nullptr. */ virtual const NvcBounds3* getFacetBound(uint32_t index) const override; private: std::vector<Vertex> mVertices; std::vector<Edge> mEdges; std::vector<Facet> mFacets; nvidia::NvBounds3 mBounds; std::vector<nvidia::NvBounds3> mPerFacetBounds; }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTAUTHORINGMESHIMPL_H
6,150
C
30.22335
153
0.657724
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoringCommon/NvBlastExtAuthoringMeshImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #define _CRT_SECURE_NO_WARNINGS #include "NvBlastExtAuthoringMeshImpl.h" #include "NvBlastExtAuthoringTypes.h" #include <NvBlastAssert.h> #include "NvMath.h" #include <NvBlastNvSharedHelpers.h> #include <NvBlastVolumeIntegrals.h> #include <cmath> #include <string.h> #include <vector> #include <algorithm> namespace Nv { namespace Blast { MeshImpl::MeshImpl(const NvcVec3* position, const NvcVec3* normals, const NvcVec2* uv, uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount) { mVertices.resize(verticesCount); for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].p = position[i]; } if (normals != 0) { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].n = normals[i]; } } else { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].n = {0, 0, 0}; } } if (uv != 0) { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].uv[0] = uv[i]; } } else { for (uint32_t i = 0; i < mVertices.size(); ++i) { mVertices[i].uv[0] = {0, 0}; } } mEdges.resize(indicesCount); mFacets.resize(indicesCount / 3); int32_t facetId = 0; for (uint32_t i = 0; i < indicesCount; i += 3) { mEdges[i].s = indices[i]; mEdges[i].e = indices[i + 1]; mEdges[i + 1].s = indices[i + 1]; mEdges[i + 1].e = indices[i + 2]; mEdges[i + 2].s = indices[i + 2]; mEdges[i + 2].e = indices[i]; mFacets[facetId].firstEdgeNumber = i; mFacets[facetId].edgesCount = 3; mFacets[facetId].materialId = 0; //Unassigned for now mFacets[facetId].smoothingGroup = -1; facetId++; } recalculateBoundingBox(); } MeshImpl::MeshImpl(const Vertex* vertices, const Edge* edges, const Facet* facets, uint32_t posCount, uint32_t edgesCount, uint32_t facetsCount) { mVertices.resize(posCount); mEdges.resize(edgesCount); mFacets.resize(facetsCount); memcpy(mVertices.data(), vertices, sizeof(Vertex) * posCount); memcpy(mEdges.data(), edges, sizeof(Edge) * edgesCount); memcpy(mFacets.data(), facets, sizeof(Facet) * facetsCount); recalculateBoundingBox(); } MeshImpl::MeshImpl(const Vertex* vertices, uint32_t count) { mVertices = std::vector<Vertex>(vertices, vertices + count); mEdges.resize(count); mFacets.resize(count / 3); uint32_t vp = 0; for (uint32_t i = 0; i < count; i += 3) { mEdges[i].s = vp; mEdges[i].e = vp + 1; mEdges[i + 1].s = vp + 1; mEdges[i + 1].e = vp + 2; mEdges[i + 2].s = vp + 2; mEdges[i + 2].e = vp; vp += 3; } for (uint32_t i = 0; i < count / 3; ++i) { mFacets[i].edgesCount = 3; mFacets[i].firstEdgeNumber = i * 3; } recalculateBoundingBox(); } MeshImpl::MeshImpl(const Vertex* vertices, uint32_t count, uint32_t* indices, uint32_t indexCount, void* materials, uint32_t materialStride) { mVertices = std::vector<Vertex>(vertices, vertices + count); mEdges.resize(indexCount); mFacets.resize(indexCount / 3); for (uint32_t i = 0; i < indexCount; i += 3) { mEdges[i].s = indices[i]; mEdges[i].e = indices[i + 1]; mEdges[i + 1].s = indices[i + 1]; mEdges[i + 1].e = indices[i + 2]; mEdges[i + 2].s = indices[i + 2]; mEdges[i + 2].e = indices[i]; } for (uint32_t i = 0; i < indexCount / 3; ++i) { mFacets[i].edgesCount = 3; mFacets[i].firstEdgeNumber = i * 3; mFacets[i].userData = 0; if (materials != nullptr) { mFacets[i].materialId = *(uint32_t*)((uint8_t*)materials + i * materialStride); } } recalculateBoundingBox(); } float MeshImpl::getMeshVolumeAndCentroid(NvcVec3& centroid) const { class MeshImplQuery { public: MeshImplQuery(const MeshImpl& mesh) : m_mesh(mesh) {} size_t faceCount() const { return (size_t)m_mesh.getFacetCount(); } size_t vertexCount(size_t faceIndex) const { return (size_t)m_mesh.getFacet((int32_t)faceIndex)->edgesCount; } NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) const { const Nv::Blast::Facet* facet = m_mesh.getFacet(faceIndex); return m_mesh.getVertices()[m_mesh.getEdges()[facet->firstEdgeNumber + vertexIndex].s].p; } const MeshImpl& m_mesh; }; return calculateMeshVolumeAndCentroid<MeshImplQuery>(centroid, *this); } uint32_t MeshImpl::getFacetCount() const { return static_cast<uint32_t>(mFacets.size()); } Vertex* MeshImpl::getVerticesWritable() { return mVertices.data(); } Edge* MeshImpl::getEdgesWritable() { return mEdges.data(); } const Vertex* MeshImpl::getVertices() const { return mVertices.data(); } const Edge* MeshImpl::getEdges() const { return mEdges.data(); } uint32_t MeshImpl::getEdgesCount() const { return static_cast<uint32_t>(mEdges.size()); } uint32_t MeshImpl::getVerticesCount() const { return static_cast<uint32_t>(mVertices.size()); } Facet* MeshImpl::getFacetsBufferWritable() { return mFacets.data(); } const Facet* MeshImpl::getFacetsBuffer() const { return mFacets.data(); } Facet* MeshImpl::getFacetWritable(int32_t facet) { return &mFacets[facet]; } const Facet* MeshImpl::getFacet(int32_t facet) const { return &mFacets[facet]; } MeshImpl::~MeshImpl() { } void MeshImpl::release() { delete this; } const NvcBounds3& MeshImpl::getBoundingBox() const { return fromNvShared(mBounds); } NvcBounds3& MeshImpl::getBoundingBoxWritable() { return fromNvShared(mBounds); } void MeshImpl::recalculateBoundingBox() { mBounds.setEmpty(); for (uint32_t i = 0; i < mVertices.size(); ++i) { mBounds.include(toNvShared(mVertices[i].p)); } calcPerFacetBounds(); } const NvcBounds3* MeshImpl::getFacetBound(uint32_t index) const { if (mPerFacetBounds.empty()) { return nullptr; } return &fromNvShared(mPerFacetBounds[index]); } void MeshImpl::calcPerFacetBounds() { mPerFacetBounds.resize(mFacets.size()); for (uint32_t i = 0; i < mFacets.size(); ++i) { auto& fb = mPerFacetBounds[i]; fb.setEmpty(); for (uint32_t v = 0; v < mFacets[i].edgesCount; ++v) { fb.include(toNvShared(mVertices[mEdges[mFacets[i].firstEdgeNumber + v].s].p)); fb.include(toNvShared(mVertices[mEdges[mFacets[i].firstEdgeNumber + v].e].p)); } } } void MeshImpl::setMaterialId(const int32_t* materialId) { if (materialId != nullptr) { for (uint32_t i = 0; i < mFacets.size(); ++i) { mFacets[i].materialId = *materialId; ++materialId; } } } bool MeshImpl::isValid() const { return mVertices.size() > 0 && mEdges.size() > 0 && mFacets.size() > 0; } void MeshImpl::replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) { for (uint32_t i = 0; i < mFacets.size(); ++i) { if (mFacets[i].materialId == oldMaterialId) { mFacets[i].materialId = newMaterialId; } } } void MeshImpl::setSmoothingGroup(const int32_t* smoothingGroups) { if (smoothingGroups != nullptr) { for (uint32_t i = 0; i < mFacets.size(); ++i) { mFacets[i].smoothingGroup = *smoothingGroups; ++smoothingGroups; } } } } // namespace Blast } // namespace Nv
9,218
C++
25.34
144
0.622152
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkActorImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { TkActorImpl* TkActorImpl::create(const TkActorDesc& desc) { const TkAssetImpl* asset = static_cast<const TkAssetImpl*>(desc.asset); TkFamilyImpl* family = TkFamilyImpl::create(asset); NvBlastFamily* familyLL = family->getFamilyLLInternal(); Array<char>::type scratch((uint32_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(familyLL, logLL)); NvBlastActor* actorLL = NvBlastFamilyCreateFirstActor(familyLL, &desc, scratch.begin(), logLL); if (actorLL == nullptr) { NVBLAST_LOG_ERROR("TkActorImpl::create: low-level actor could not be created."); return nullptr; } TkActorImpl* actor = family->addActor(actorLL); if (actor != nullptr) { // Add internal joints const uint32_t internalJointCount = asset->getJointDescCountInternal(); const TkAssetJointDesc* jointDescs = asset->getJointDescsInternal(); const NvBlastSupportGraph graph = asset->getGraph(); TkJointImpl* joints = family->getInternalJoints(); for (uint32_t jointNum = 0; jointNum < internalJointCount; ++jointNum) { const TkAssetJointDesc& assetJointDesc = jointDescs[jointNum]; NVBLAST_ASSERT(assetJointDesc.nodeIndices[0] < graph.nodeCount && assetJointDesc.nodeIndices[1] < graph.nodeCount); TkJointDesc jointDesc; jointDesc.families[0] = jointDesc.families[1] = family; jointDesc.chunkIndices[0] = graph.chunkIndices[assetJointDesc.nodeIndices[0]]; jointDesc.chunkIndices[1] = graph.chunkIndices[assetJointDesc.nodeIndices[1]]; jointDesc.attachPositions[0] = assetJointDesc.attachPositions[0]; jointDesc.attachPositions[1] = assetJointDesc.attachPositions[1]; TkJointImpl* joint = new (joints + jointNum) TkJointImpl(jointDesc, family); actor->addJoint(joint->m_links[0]); } // Mark as damaged to trigger first split call. It could be the case that asset is already split into few actors initially. actor->markAsDamaged(); } return actor; } //////// Member functions //////// TkActorImpl::TkActorImpl() : m_actorLL(nullptr) , m_family(nullptr) , m_group(nullptr) , m_groupJobIndex(invalidIndex<uint32_t>()) , m_flags(0) , m_jointCount(0) { #if NV_PROFILE NvBlastTimersReset(&m_timers); #endif } TkActorImpl::~TkActorImpl() { } void TkActorImpl::release() { // Disassoaciate all joints // Copy joint array for safety against implementation of joint->setActor TkJointImpl** joints = reinterpret_cast<TkJointImpl**>(NvBlastAlloca(sizeof(TkJointImpl*)*getJointCountInternal())); TkJointImpl** stop = joints + getJointCountInternal(); TkJointImpl** jointHandle = joints; for (JointIt j(*this); (bool)j; ++j) { *jointHandle++ = *j; } jointHandle = joints; while (jointHandle < stop) { NVBLAST_ASSERT(*jointHandle != nullptr); NVBLAST_ASSERT((*jointHandle)->getDataInternal().actors[0] == this || (*jointHandle)->getDataInternal().actors[1] == this); (*jointHandle++)->setActors(nullptr, nullptr); } NVBLAST_ASSERT(getJointCountInternal() == 0); if (m_group != nullptr) { m_group->removeActor(*this); } if (m_actorLL != nullptr) { NvBlastActorDeactivate(m_actorLL, logLL); } if (m_family != nullptr) { m_family->removeActor(this); // Make sure we dispatch any remaining events when this family is emptied, since it will no longer be done by any group if (m_family->getActorCountInternal() == 0) { m_family->getQueue().dispatch(); } } } const NvBlastActor* TkActorImpl::getActorLL() const { return m_actorLL; } TkFamily& TkActorImpl::getFamily() const { return getFamilyImpl(); } uint32_t TkActorImpl::getIndex() const { return getIndexInternal(); } TkGroup* TkActorImpl::getGroup() const { return getGroupImpl(); } TkGroup* TkActorImpl::removeFromGroup() { if (m_group == nullptr) { NVBLAST_LOG_WARNING("TkActorImpl::removeFromGroup: actor not in a group."); return nullptr; } if (m_group->isProcessing()) { NVBLAST_LOG_ERROR("TkActorImpl::removeFromGroup: cannot alter Group while processing."); return nullptr; } TkGroup* group = m_group; return m_group->removeActor(*this) ? group : nullptr; } NvBlastFamily* TkActorImpl::getFamilyLL() const { return m_family->getFamilyLLInternal(); } const TkAsset* TkActorImpl::getAsset() const { return m_family->getAssetImpl(); } uint32_t TkActorImpl::getVisibleChunkCount() const { return NvBlastActorGetVisibleChunkCount(m_actorLL, logLL); } uint32_t TkActorImpl::getVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize) const { return NvBlastActorGetVisibleChunkIndices(visibleChunkIndices, visibleChunkIndicesSize, m_actorLL, logLL); } uint32_t TkActorImpl::getGraphNodeCount() const { return NvBlastActorGetGraphNodeCount(m_actorLL, logLL); } uint32_t TkActorImpl::getGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize) const { return NvBlastActorGetGraphNodeIndices(graphNodeIndices, graphNodeIndicesSize, m_actorLL, logLL); } const float* TkActorImpl::getBondHealths() const { return NvBlastActorGetBondHealths(m_actorLL, logLL); } uint32_t TkActorImpl::getSplitMaxActorCount() const { return NvBlastActorGetMaxActorCountForSplit(m_actorLL, logLL); } bool TkActorImpl::isDamaged() const { NVBLAST_ASSERT(!m_flags.isSet(TkActorFlag::DAMAGED) || (m_flags.isSet(TkActorFlag::DAMAGED) && m_flags.isSet(TkActorFlag::PENDING))); return m_flags.isSet(TkActorFlag::DAMAGED); } void TkActorImpl::markAsDamaged() { m_flags |= TkActorFlag::DAMAGED; makePending(); } void TkActorImpl::makePending() { if (m_group != nullptr && !isPending()) { m_group->enqueue(this); } m_flags |= TkActorFlag::PENDING; } TkActorImpl::operator Nv::Blast::TkActorData() const { TkActorData data = { m_family, userData, getIndex() }; return data; } void TkActorImpl::damage(const NvBlastDamageProgram& program, const void* programParams) { BLAST_PROFILE_SCOPE_L("TkActor::damage"); if (m_group == nullptr) { NVBLAST_LOG_WARNING("TkActor::damage: actor is not in a group, cannot fracture."); return; } if (m_group->isProcessing()) { NVBLAST_LOG_WARNING("TkActor::damage: group is being processed, cannot fracture this actor."); return; } if (NvBlastActorCanFracture(m_actorLL, logLL)) { m_damageBuffer.pushBack(DamageData{ program, programParams}); makePending(); } } void TkActorImpl::generateFracture(NvBlastFractureBuffers* commands, const NvBlastDamageProgram& program, const void* programParams) const { BLAST_PROFILE_SCOPE_L("TkActor::generateFracture"); if (m_group && m_group->isProcessing()) { NVBLAST_LOG_WARNING("TkActor::generateFracture: group is being processed, cannot fracture this actor."); return; } // const context, must make m_timers mutable otherwise NvBlastActorGenerateFracture(commands, m_actorLL, program, programParams, logLL, const_cast<NvBlastTimers*>(&m_timers)); } void TkActorImpl::applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands) { BLAST_PROFILE_SCOPE_L("TkActor::applyFracture"); if (m_group && m_group->isProcessing()) { NVBLAST_LOG_WARNING("TkActor::applyFracture: group is being processed, cannot fracture this actor."); return; } NvBlastActorApplyFracture(eventBuffers, m_actorLL, commands, logLL, &m_timers); if (commands->chunkFractureCount > 0 || commands->bondFractureCount > 0) { markAsDamaged(); TkFractureCommands* fevt = getFamilyImpl().getQueue().allocData<TkFractureCommands>(); fevt->tkActorData = *this; fevt->buffers = *commands; getFamilyImpl().getQueue().addEvent(fevt); getFamilyImpl().getQueue().dispatch(); } } uint32_t TkActorImpl::getJointCount() const { return getJointCountInternal(); } uint32_t TkActorImpl::getJoints(TkJoint** joints, uint32_t jointsSize) const { uint32_t jointsWritten = 0; for (JointIt j(*this); (bool)j && jointsWritten < jointsSize; ++j) { joints[jointsWritten++] = *j; } return jointsWritten; } bool TkActorImpl::hasExternalBonds() const { return NvBlastActorHasExternalBonds(m_actorLL, logLL); } } // namespace Blast } // namespace Nv
10,590
C++
27.394102
138
0.694523
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkGUID.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKGUID_H #define NVBLASTTKGUID_H #include "NvPreprocessor.h" #if NV_WINDOWS_FAMILY #include <rpc.h> #else //#include <uuid/uuid.h> #include "NvBlastTime.h" #endif #include "NsHash.h" namespace Nv { namespace Blast { #if NV_WINDOWS_FAMILY NV_INLINE NvBlastID TkGenerateGUID(void* ptr) { NV_UNUSED(ptr); NV_COMPILE_TIME_ASSERT(sizeof(UUID) == sizeof(NvBlastID)); NvBlastID guid; UuidCreate(reinterpret_cast<UUID*>(&guid)); return guid; } #else NV_INLINE NvBlastID TkGenerateGUID(void* ptr) { // NV_COMPILE_TIME_ASSERT(sizeof(uuid_t) == sizeof(NvBlastID)); Time time; NvBlastID guid; // uuid_generate_random(reinterpret_cast<uuid_t&>(guid)); *reinterpret_cast<uint64_t*>(guid.data) = reinterpret_cast<uintptr_t>(ptr); *reinterpret_cast<int64_t*>(guid.data + 8) = time.getLastTickCount(); return guid; } #endif /** Compares two NvBlastIDs. \param[in] id1 A pointer to the first id to compare. \param[in] id2 A pointer to the second id to compare. \return true iff ids are equal. */ NV_INLINE bool TkGUIDsEqual(const NvBlastID* id1, const NvBlastID* id2) { return !memcmp(id1, id2, sizeof(NvBlastID)); } /** Clears an NvBlastID (sets all of its fields to zero). \param[out] id A pointer to the ID to clear. */ NV_INLINE void TkGUIDReset(NvBlastID* id) { memset(id, 0, sizeof(NvBlastID)); } /** Tests an NvBlastID to determine if it's zeroed. After calling TkGUIDReset on an ID, passing it to this function will return a value of true. \param[in] id A pointer to the ID to test. */ NV_INLINE bool TkGUIDIsZero(const NvBlastID* id) { return *reinterpret_cast<const uint64_t*>(&id->data[0]) == 0 && *reinterpret_cast<const uint64_t*>(&id->data[8]) == 0; } } // namespace Blast } // namespace Nv namespace nvidia { namespace shdfnd { // hash specialization for NvBlastID template <> struct Hash<NvBlastID> { uint32_t operator()(const NvBlastID& k) const { // "DJB" string hash uint32_t h = 5381; for (uint32_t i = 0; i < sizeof(k.data) / sizeof(k.data[0]); ++i) h = ((h << 5) + h) ^ uint32_t(k.data[i]); return h; } bool equal(const NvBlastID& k0, const NvBlastID& k1) const { return Nv::Blast::TkGUIDsEqual(&k0, &k1); } }; } // namespace shdfnd } // namespace nvidia #endif // #ifndef NVBLASTTKGUID_H
3,944
C
25.655405
122
0.704615
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTaskImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTASKIMPL_H #define NVBLASTTKTASKIMPL_H #include "NvBlast.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkEventQueue.h" #include "NvBlastArray.h" #include <atomic> #include <mutex> #include <condition_variable> #include "NvBlastAssert.h" #include "NvBlastTkGroup.h" // TkGroupStats namespace Nv { namespace Blast { class TkGroupImpl; class TkActorImpl; class TkFamilyImpl; /** Transient structure describing a job and its results. */ struct TkWorkerJob { TkActorImpl* m_tkActor; //!< the actor to process TkActorImpl** m_newActors; //!< list of child actors created by splitting uint32_t m_newActorsCount; //!< the number of child actors created }; /** A list of equally sized memory blocks sharable between tasks. */ template<typename T> class SharedBlock { public: SharedBlock() : m_numElementsPerBlock(0), m_numBlocks(0), m_buffer(nullptr) {} /** Allocates one large memory block of elementsPerBlock*numBlocks elements. */ void allocate(uint32_t elementsPerBlock, uint32_t numBlocks) { NVBLAST_ASSERT(elementsPerBlock > 0 && numBlocks > 0); m_buffer = reinterpret_cast<T*>(NVBLAST_ALLOC_NAMED(elementsPerBlock*numBlocks*sizeof(T), "SharedBlock")); m_numElementsPerBlock = elementsPerBlock; m_numBlocks = numBlocks; } /** Returns the pointer to the first element of a block of numElementsPerBlock() elements. */ T* getBlock(uint32_t id) { NVBLAST_ASSERT(id < m_numBlocks || 0 == m_numElementsPerBlock); return &m_buffer[id*m_numElementsPerBlock]; } /** The number of elements available per block. */ uint32_t numElementsPerBlock() const { return m_numElementsPerBlock; } /** Frees the whole memory block. */ void release() { m_numBlocks = 0; m_numElementsPerBlock = 0; NVBLAST_FREE(m_buffer); m_buffer = nullptr; } private: uint32_t m_numElementsPerBlock; //!< elements available in one block uint32_t m_numBlocks; //!< number of virtual blocks available T* m_buffer; //!< contiguous memory for all blocks }; /** A preallocated, shared array from which can be allocated from in tasks. Intended to be used when the maximum amount of data (e.g. for a family) is known in advance. No further allocations take place on exhaustion. Exhaustion asserts in debug builds and overflows otherwise. */ template<typename T> class SharedBuffer { public: SharedBuffer() : m_capacity(0), m_used(0), m_buffer(nullptr) {} /** Atomically gets a pointer to the first element of an array of n elements. */ T* reserve(size_t n) { NVBLAST_ASSERT(m_used + n <= m_capacity); size_t start = m_used.fetch_add(n); return &m_buffer[start]; } /** Preallocates memory for capacity elements. */ void allocate(size_t capacity) { NVBLAST_ASSERT(m_buffer == nullptr); m_buffer = reinterpret_cast<T*>(NVBLAST_ALLOC_NAMED(capacity*sizeof(T), "SplitMemory")); m_capacity = capacity; } /** Preserves the memory allocated but resets to reserve from the beginning of the array. */ void reset() { m_used = 0; } /** Frees the preallocated array. */ void release() { NVBLAST_ASSERT(m_buffer != nullptr); NVBLAST_FREE(m_buffer); m_buffer = nullptr; m_capacity = m_used = 0; } private: size_t m_capacity; //!< available elements in the buffer std::atomic<size_t> m_used; //!< used elements in the buffer T* m_buffer; //!< the memory containing T's }; /** Allocates from a preallocated, externally owned memory block initialized with. When blocks run out of space, new ones are allocated and owned by this class. */ template<typename T> class LocalBuffer { public: /** Returns the pointer to the first element of an array of n elements. Allocates a new block of memory when exhausted, its size being the larger of n and capacity set with initialize(). */ T* allocate(size_t n) { if (m_used + n > m_capacity) { allocateNewBlock(n > m_capacity ? n : m_capacity); } size_t index = m_used; m_used += n; return &m_currentBlock[index]; } /** Release the additionally allocated memory blocks. The externally owned memory block remains untouched. */ void clear() { for (void* block : m_memoryBlocks) { NVBLAST_FREE(block); } m_memoryBlocks.clear(); } /** Set the externally owned memory block to start allocating from, with a size of capacity elements. */ void initialize(T* block, size_t capacity) { m_currentBlock = block; m_capacity = capacity; m_used = 0; } private: /** Allocates space for capacity elements. */ void allocateNewBlock(size_t capacity) { BLAST_PROFILE_SCOPE_L("Local Buffer allocation"); m_capacity = capacity; m_currentBlock = static_cast<T*>(NVBLAST_ALLOC_NAMED(capacity*sizeof(T), "Blast LocalBuffer")); m_memoryBlocks.pushBack(m_currentBlock); m_used = 0; } InlineArray<void*, 4>::type m_memoryBlocks; //!< storage for memory blocks T* m_currentBlock; //!< memory block used to allocate from size_t m_used; //!< elements used in current block size_t m_capacity; //!< elements available in current block }; /** Holds the memory used by TkWorker for each family in each group. */ class SharedMemory { public: SharedMemory() : m_eventsMemory(0), m_eventsCount(0), m_refCount(0) {} /** Reserves n entries from preallocated memory. */ NvBlastActor** reserveNewActors(size_t n) { return m_newActorBuffers.reserve(n); } /** Reserves n entries from preallocated memory. */ TkActor** reserveNewTkActors(size_t n) { return m_newTkActorBuffers.reserve(n); } /** Allocates buffers to hold */ void allocate(TkFamilyImpl&); /** Resets the internal buffers to reserve from their beginning. Preserves the allocated memory. */ void reset() { m_newActorBuffers.reset(); m_newTkActorBuffers.reset(); } /** Increments the reference count. */ void addReference() { m_refCount++; } /** Increments the reference count by n. */ void addReference(size_t n) { m_refCount += n; } /** Decrements the reference count. Returns true if the count reached zero. */ bool removeReference() { m_refCount--; return !isUsed(); } /** Checks if the reference count is not zero. */ bool isUsed() { return m_refCount > 0; } /** Release the internal buffers' memory. */ void release() { m_newActorBuffers.release(); m_newTkActorBuffers.release(); } TkEventQueue m_events; //!< event queue shared across a group's actors of the same family uint32_t m_eventsMemory; //!< expected memory size for event data uint32_t m_eventsCount; //!< expected number of events private: size_t m_refCount; //!< helper for usage and releasing memory SharedBuffer<NvBlastActor*> m_newActorBuffers; //!< memory for splitting SharedBuffer<TkActor*> m_newTkActorBuffers; //!< memory for split events }; /** Thread worker fracturing and splitting actors sequentially. The list of actual jobs is provided by the group owning this worker. */ class TkWorker final : public TkGroupWorker { public: TkWorker() : m_id(~(uint32_t)0), m_group(nullptr), m_isBusy(false) {} void process(uint32_t jobID); void initialize(); void process(TkWorkerJob& job); uint32_t m_id; //!< this worker's id TkGroupImpl* m_group; //!< the group owning this worker LocalBuffer<NvBlastChunkFractureData> m_chunkBuffer; //!< memory manager for chunk event data LocalBuffer<NvBlastBondFractureData> m_bondBuffer; //!< memory manager for bonds event data void* m_splitScratch; NvBlastFractureBuffers m_tempBuffer; bool m_isBusy; #if NV_PROFILE TkGroupStats m_stats; #endif }; } } #endif // NVBLASTTKTASKIMPL_H
10,422
C
26.9437
122
0.627807
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFamilyImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlastIndexFns.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { //////// Static data //////// NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Family); //////// Member functions //////// TkFamilyImpl::TkFamilyImpl() : m_familyLL(nullptr), m_internalJointCount(0), m_asset(nullptr) { } TkFamilyImpl::TkFamilyImpl(const NvBlastID& id) : TkFamilyType(id), m_familyLL(nullptr), m_internalJointCount(0), m_asset(nullptr) { } TkFamilyImpl::~TkFamilyImpl() { if (m_familyLL != nullptr) { uint32_t familyActorCount = NvBlastFamilyGetActorCount(m_familyLL, logLL); if (familyActorCount != 0) { NVBLAST_LOG_WARNING("TkFamilyImpl::~TkFamilyImpl(): family actor count is not 0."); } NVBLAST_FREE(m_familyLL); } } void TkFamilyImpl::release() { for (TkActorImpl& actor : m_actors) { if (actor.isActive()) { actor.release(); } } m_actors.clear(); NVBLAST_DELETE(this, TkFamilyImpl); } const NvBlastFamily* TkFamilyImpl::getFamilyLL() const { return m_familyLL; } TkActorImpl* TkFamilyImpl::addActor(NvBlastActor* actorLL) { TkActorImpl* actor = getActorByActorLL(actorLL); NVBLAST_ASSERT(actor); actor->m_actorLL = actorLL; actor->m_family = this; return actor; } void TkFamilyImpl::removeActor(TkActorImpl* actor) { NVBLAST_ASSERT(actor != nullptr && actor->m_family == this); //actor->m_family = nullptr; actor->m_actorLL = nullptr; } uint32_t TkFamilyImpl::getActorCount() const { return getActorCountInternal(); } uint32_t TkFamilyImpl::getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart /*= 0*/) const { uint32_t actorCount = getActorCount(); if (actorCount <= indexStart) { NVBLAST_LOG_WARNING("TkFamilyImpl::getActors: indexStart beyond end of actor list."); return 0; } actorCount -= indexStart; if (actorCount > bufferSize) { actorCount = static_cast<uint32_t>(bufferSize); } uint32_t index = 0; for (const TkActorImpl& actor : m_actors) { if (actor.isActive()) { if (index >= indexStart) { if ((index - indexStart) >= actorCount) { break; } else { *buffer++ = const_cast<TkActorImpl*>(&actor); } } index++; } } return actorCount; } NV_INLINE bool areLLActorsEqual(const NvBlastActor* actor0, const NvBlastActor* actor1, Array<uint32_t>::type& scratch) { if (NvBlastActorGetGraphNodeCount(actor0, logLL) != NvBlastActorGetGraphNodeCount(actor1, logLL)) { return false; } const uint32_t chunkCount = NvBlastActorGetVisibleChunkCount(actor0, logLL); if (chunkCount != NvBlastActorGetVisibleChunkCount(actor1, logLL)) { return false; } scratch.resize(chunkCount * 2); NvBlastActorGetVisibleChunkIndices(scratch.begin(), chunkCount, actor0, logLL); NvBlastActorGetVisibleChunkIndices(scratch.begin() + chunkCount, chunkCount, actor1, logLL); return memcmp(scratch.begin(), scratch.begin() + chunkCount, chunkCount * sizeof(uint32_t)) == 0; } void TkFamilyImpl::reinitialize(const NvBlastFamily* newFamily, TkGroup* group) { NVBLAST_ASSERT(newFamily); #if NV_ENABLE_ASSERTS NvBlastID id0 = NvBlastFamilyGetAssetID(m_familyLL, logLL); NvBlastID id1 = NvBlastFamilyGetAssetID(newFamily, logLL); NVBLAST_ASSERT(TkGUIDsEqual(&id0, &id1)); #endif NVBLAST_ASSERT(NvBlastFamilyGetSize(m_familyLL, logLL) == NvBlastFamilyGetSize(newFamily, logLL)); // alloc and init new family const uint32_t blockSize = NvBlastFamilyGetSize(newFamily, logLL); NvBlastFamily* newFamilyCopy = (NvBlastFamily*)NVBLAST_ALLOC_NAMED(blockSize, "TkFamilyImpl::reinitialize"); memcpy(newFamilyCopy, newFamily, blockSize); NvBlastFamilySetAsset(newFamilyCopy, m_asset->getAssetLL(), logLL); // get actors from new family Array<NvBlastActor*>::type newLLActors(NvBlastFamilyGetActorCount(newFamilyCopy, logLL)); uint32_t actorCount = NvBlastFamilyGetActors(newLLActors.begin(), newLLActors.size(), newFamilyCopy, logLL); // reset actor families to nullptr (we use it as a flag later) for (TkActorImpl& actor : m_actors) { if (actor.isActive()) { actor.m_family = nullptr; } } // prepare split event with new actors auto newActorsSplitEvent = getQueue().allocData<TkSplitEvent>(); Array<TkActor*>::type children(actorCount); children.resizeUninitialized(0); newActorsSplitEvent->children = children.begin(); // scratch Array<uint32_t>::type scratch(m_asset->getChunkCount()); for (uint32_t i = 0; i < actorCount; ++i) { NvBlastActor* newLLActor = newLLActors[i]; uint32_t actorIndex = NvBlastActorGetIndex(newLLActor, logLL); TkActorImpl& tkActor = *getActorByIndex(actorIndex); tkActor.m_family = this; if (!tkActor.isActive() || !areLLActorsEqual(newLLActor, tkActor.m_actorLL, scratch)) { if (tkActor.isActive()) { auto removeSplitEvent = getQueue().allocData<TkSplitEvent>(); removeSplitEvent->parentData.family = this; removeSplitEvent->numChildren = 0; removeSplitEvent->parentData.userData = tkActor.userData; removeSplitEvent->parentData.index = tkActor.getIndex(); getQueue().addEvent(removeSplitEvent); } tkActor.m_actorLL = newLLActor; // switch groups TkGroupImpl* prevGroup = tkActor.m_group; if (prevGroup != group) { if (prevGroup) { prevGroup->removeActor(tkActor); } if (group) { group->addActor(tkActor); } } children.pushBack(&tkActor); } else { tkActor.m_actorLL = newLLActor; } } // if m_family is still nullptr for an active actor -> remove it. It doesn't exist in new family. for (TkActorImpl& tkActor : m_actors) { if (tkActor.isActive() && tkActor.m_family == nullptr) { tkActor.m_family = this; if (tkActor.m_group) { tkActor.m_group->removeActor(tkActor); } auto removeSplitEvent = getQueue().allocData<TkSplitEvent>(); removeSplitEvent->parentData.family = this; removeSplitEvent->numChildren = 0; removeSplitEvent->parentData.userData = tkActor.userData; removeSplitEvent->parentData.index = tkActor.getIndex(); getQueue().addEvent(removeSplitEvent); tkActor.m_actorLL = nullptr; } } // add split event with all new actors newActorsSplitEvent->parentData.family = this; newActorsSplitEvent->parentData.userData = 0; newActorsSplitEvent->parentData.index = invalidIndex<uint32_t>(); newActorsSplitEvent->numChildren = children.size(); if (newActorsSplitEvent->numChildren > 0) { getQueue().addEvent(newActorsSplitEvent); } // replace family NVBLAST_FREE(m_familyLL); m_familyLL = newFamilyCopy; // update joints for (TkActorImpl& tkActor : m_actors) { if (!tkActor.m_jointList.isEmpty()) { updateJoints(&tkActor); } } getQueue().dispatch(); } TkActorImpl* TkFamilyImpl::getActorByChunk(uint32_t chunk) { if (chunk >= NvBlastAssetGetChunkCount(m_asset->getAssetLLInternal(), logLL)) { NVBLAST_LOG_WARNING("TkFamilyImpl::getActorByChunk: invalid chunk index. Returning NULL."); return nullptr; } NvBlastActor* actorLL = NvBlastFamilyGetChunkActor(m_familyLL, chunk, logLL); return actorLL ? getActorByActorLL(actorLL) : nullptr; } void TkFamilyImpl::applyFractureInternal(const NvBlastFractureBuffers* commands) { NvBlastSupportGraph graph = getAsset()->getGraph(); // apply bond fracture commands on relevant actors { TkActorImpl* currActor = nullptr; NvBlastBondFractureData* bondFractures = commands->bondFractures; uint32_t bondFracturesCount = 0; auto applyFracture = [&]() { if (bondFracturesCount > 0) { if (currActor != nullptr && currActor->isActive()) { NvBlastFractureBuffers newCommands; newCommands.bondFractures = bondFractures; newCommands.bondFractureCount = bondFracturesCount; newCommands.chunkFractures = nullptr; newCommands.chunkFractureCount = 0; currActor->applyFracture(nullptr, &newCommands); } bondFractures += bondFracturesCount; bondFracturesCount = 0; } }; for (uint32_t i = 0; i < commands->bondFractureCount; ++i, ++bondFracturesCount) { const NvBlastBondFractureData& command = commands->bondFractures[i]; uint32_t chunk0 = graph.chunkIndices[command.nodeIndex0]; uint32_t chunk1 = graph.chunkIndices[command.nodeIndex1]; TkActorImpl* actor0 = getActorByChunk(chunk0); TkActorImpl* actor1 = getActorByChunk(chunk1); if (actor0 != actor1) { // skipping this event, bond already broken actor0 = nullptr; } if (actor0 != currActor) { applyFracture(); currActor = actor0; } } if (bondFracturesCount > 0) { applyFracture(); } } // apply chunk fracture commands on relevant actors { TkActorImpl* currActor = nullptr; NvBlastChunkFractureData* chunkFractures = commands->chunkFractures; uint32_t chunkFracturesCount = 0; auto applyFracture = [&]() { if (chunkFracturesCount > 0) { if (currActor != nullptr && currActor->isActive()) { NvBlastFractureBuffers newCommands; newCommands.bondFractures = nullptr; newCommands.bondFractureCount = 0; newCommands.chunkFractures = chunkFractures; newCommands.chunkFractureCount = chunkFracturesCount; currActor->applyFracture(nullptr, &newCommands); } chunkFractures += chunkFracturesCount; chunkFracturesCount = 0; } }; for (uint32_t i = 0; i < commands->chunkFractureCount; ++i, ++chunkFracturesCount) { const NvBlastChunkFractureData& command = commands->chunkFractures[i]; TkActorImpl* actor = getActorByChunk(command.chunkIndex); if (actor != currActor) { applyFracture(); currActor = actor; } } if (chunkFracturesCount > 0) { applyFracture(); } } } void TkFamilyImpl::updateJoints(TkActorImpl* actor, TkEventQueue* alternateQueue) { // Copy joint array for safety against implementation of joint->setActor TkJointImpl** joints = reinterpret_cast<TkJointImpl**>(NvBlastAlloca(sizeof(TkJointImpl*)*actor->getJointCountInternal())); TkJointImpl** stop = joints + actor->getJointCountInternal(); TkJointImpl** jointHandle = joints; for (TkActorImpl::JointIt j(*actor); (bool)j; ++j) { *jointHandle++ = *j; } jointHandle = joints; while (jointHandle < stop) { TkJointImpl* joint = *jointHandle++; const TkJointData& data = joint->getDataInternal(); TkActorImpl* actor0 = data.actors[0] != nullptr ? static_cast<TkActorImpl&>(*data.actors[0]).getFamilyImpl().getActorByChunk(data.chunkIndices[0]) : nullptr; TkActorImpl* actor1 = data.actors[1] != nullptr ? static_cast<TkActorImpl&>(*data.actors[1]).getFamilyImpl().getActorByChunk(data.chunkIndices[1]) : nullptr; joint->setActors(actor0, actor1, alternateQueue); } } const TkAsset* TkFamilyImpl::getAsset() const { return m_asset; } //////// Static functions //////// TkFamilyImpl* TkFamilyImpl::create(const TkAssetImpl* asset) { TkFamilyImpl* family = NVBLAST_NEW(TkFamilyImpl); family->m_asset = asset; void* mem = NVBLAST_ALLOC_NAMED(NvBlastAssetGetFamilyMemorySize(asset->getAssetLL(), logLL), "TkFamilyImpl::create"); family->m_familyLL = NvBlastAssetCreateFamily(mem, asset->getAssetLL(), logLL); //family->addListener(*TkFrameworkImpl::get()); if (family->m_familyLL == nullptr) { NVBLAST_LOG_ERROR("TkFamilyImpl::create: low-level family could not be created."); family->release(); return nullptr; } uint32_t maxActorCount = NvBlastFamilyGetMaxActorCount(family->m_familyLL, logLL); family->m_actors.resize(maxActorCount); family->m_internalJointBuffer.resize(asset->getJointDescCountInternal() * sizeof(TkJointImpl), 0); family->m_internalJointCount = asset->getJointDescCountInternal(); return family; } TkJointImpl** TkFamilyImpl::createExternalJointHandle(const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1) { JointSet* jointSet; const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(otherFamilyID); uint32_t otherFamilyIndex; if (jointSetIndexEntry != nullptr) { otherFamilyIndex = jointSetIndexEntry->second; jointSet = m_jointSets[otherFamilyIndex]; } else { jointSet = NVBLAST_NEW(JointSet); NVBLAST_CHECK_ERROR(jointSet != nullptr, "TkFamilyImpl::addExternalJoint: failed to create joint set for other family ID.", return nullptr); jointSet->m_familyID = otherFamilyID; otherFamilyIndex = m_jointSets.size(); m_familyIDMap[otherFamilyID] = otherFamilyIndex; m_jointSets.pushBack(jointSet); } const ExternalJointKey key(chunkIndex0, chunkIndex1); const bool jointExists = jointSet->m_joints.find(key) != nullptr; NVBLAST_CHECK_WARNING(!jointExists, "TkFamilyImpl::addExternalJoint: joint already added.", return nullptr); return &jointSet->m_joints[key]; } bool TkFamilyImpl::deleteExternalJointHandle(TkJointImpl*& joint, const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1) { const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(otherFamilyID); if (jointSetIndexEntry != nullptr) { const uint32_t jointSetIndex = jointSetIndexEntry->second; ExternalJointKey jointKey = ExternalJointKey(chunkIndex0, chunkIndex1); const HashMap<ExternalJointKey, TkJointImpl*>::type::Entry* e = m_jointSets[jointSetIndex]->m_joints.find(jointKey); if (e != nullptr) { joint = e->second; // Return value that was stored m_jointSets[jointSetIndex]->m_joints.erase(jointKey); // Delete the joint set if it is empty if (m_jointSets[jointSetIndex]->m_joints.size() == 0) { NVBLAST_DELETE(m_jointSets[jointSetIndex], JointSet); m_jointSets.replaceWithLast(jointSetIndex); m_familyIDMap.erase(otherFamilyID); if (jointSetIndex < m_jointSets.size()) { m_familyIDMap[m_jointSets[jointSetIndex]->m_familyID] = jointSetIndex; } } return true; } } return false; } TkJointImpl* TkFamilyImpl::findExternalJoint(const TkFamilyImpl* otherFamily, ExternalJointKey key) const { const FamilyIDMap::Entry* jointSetIndexEntry = m_familyIDMap.find(getFamilyID(otherFamily)); if (jointSetIndexEntry != nullptr) { const HashMap<ExternalJointKey, TkJointImpl*>::type::Entry* e = m_jointSets[jointSetIndexEntry->second]->m_joints.find(key); if (e != nullptr) { return e->second; } } return nullptr; } } // namespace Blast } // namespace Nv
18,252
C++
31.711469
148
0.633739
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTask.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastGlobals.h" #include "NvBlastTkTask.h" #include "NvCpuDispatcher.h" #include "NvBlastTkGroup.h" using namespace Nv::Blast; uint32_t TkGroupTaskManagerImpl::process(uint32_t workerCount) { NVBLAST_CHECK_WARNING(m_group != nullptr, "TkGroupTaskManager::process cannot process, no group set.", return 0); NVBLAST_CHECK_WARNING(m_sync.isDone(), "TkGroupTaskManager::process group is already being processed.", return 0); // at least one task must start, even when dispatcher has none specified uint32_t dispatcherThreads = m_taskManager.getCpuDispatcher()->getWorkerCount(); dispatcherThreads = dispatcherThreads > 0 ? dispatcherThreads : 1; // not expecting an arbitrary amount of tasks uint32_t availableTasks = TASKS_MAX_COUNT; // use workerCount tasks, unless dispatcher has less threads or less tasks are available uint32_t requestedTasks = workerCount > 0 ? workerCount : dispatcherThreads; requestedTasks = requestedTasks > dispatcherThreads ? dispatcherThreads : requestedTasks; requestedTasks = requestedTasks > availableTasks ? availableTasks : requestedTasks; // ensure the group has enough memory allocated for concurrent processing m_group->setWorkerCount(requestedTasks); // check if there is work to do uint32_t jobCount = m_group->startProcess(); if (jobCount) { // don't start more tasks than jobs are available requestedTasks = requestedTasks > jobCount ? jobCount : requestedTasks; // common counter for all tasks m_counter.reset(jobCount); // set to busy state m_sync.setCount(requestedTasks); // set up tasks for (uint32_t i = 0; i < requestedTasks; i++) { m_tasks[i].setup(m_group, &m_counter, &m_sync); m_tasks[i].setContinuation(m_taskManager, nullptr); m_tasks[i].removeReference(); } return requestedTasks; } // there was no work to be done return 0; } bool TkGroupTaskManagerImpl::wait(bool block) { if (block && !m_sync.isDone()) { m_sync.wait(); } if (m_sync.isDone()) { return m_group->endProcess(); } return false; } void TkGroupTaskManagerImpl::setGroup(TkGroup* group) { NVBLAST_CHECK_WARNING(m_sync.isDone(), "TkGroupTaskManager::setGroup trying to change group while processing.", return); m_group = group; } TkGroupTaskManager* TkGroupTaskManager::create(nvidia::task::NvTaskManager& taskManager, TkGroup* group) { return NVBLAST_NEW(TkGroupTaskManagerImpl) (taskManager, group); } void TkGroupTaskManagerImpl::release() { NVBLAST_CHECK_WARNING(m_sync.isDone(), "TkGroupTaskManager::release group is still being processed.", return); NVBLAST_DELETE(this, TkGroupTaskManagerImpl); }
4,386
C++
35.558333
124
0.719562
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkGroupImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvPreprocessor.h" #include "NvBlastAssert.h" #include "NvBlast.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkTaskImpl.h" #undef max #undef min #include <algorithm> using namespace nvidia; namespace Nv { namespace Blast { //////// Static data //////// NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Group); //////// Member functions //////// TkGroupImpl::TkGroupImpl() : m_actorCount(0), m_isProcessing(false) { #if NV_PROFILE memset(&m_stats, 0, sizeof(TkGroupStats)); #endif } TkGroupImpl::~TkGroupImpl() { NVBLAST_ASSERT(getActorCount() == 0); NVBLAST_ASSERT(m_sharedMemory.size() == 0); } void TkGroupImpl::release() { if (isProcessing()) { // abort all processing? NVBLAST_LOG_ERROR("TkGroup::release: cannot release Group while processing."); NVBLAST_ALWAYS_ASSERT_MESSAGE("TkGroup::release: cannot release Group while processing."); return; } for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it) { TkFamilyImpl* family = it->first; for (TkActorImpl& actor : family->getActorsInternal()) { if (actor.m_group == this) { removeActorInternal(actor); } } SharedMemory* mem = it->second; mem->release(); NVBLAST_DELETE(mem, SharedMemory); } m_sharedMemory.clear(); m_bondTempDataBlock.release(); m_chunkTempDataBlock.release(); m_bondEventDataBlock.release(); m_chunkEventDataBlock.release(); m_splitScratchBlock.release(); NVBLAST_DELETE(this, TkGroupImpl); } void TkGroupImpl::addActorsInternal(TkActorImpl** actors, uint32_t numActors) { for (uint32_t i = 0; i < numActors; i++) { addActorInternal(*actors[i]); } } void TkGroupImpl::addActorInternal(TkActorImpl& tkActor) { NVBLAST_ASSERT(tkActor.getGroup() == nullptr); tkActor.m_group = this; m_actorCount++; } bool TkGroupImpl::addActor(TkActor& actor) { TkActorImpl& tkActor = static_cast<TkActorImpl&>(actor); if (tkActor.getGroup() != nullptr) { NVBLAST_LOG_ERROR("TkGroup::addActor: actor already belongs to a Group. Remove from current group first."); return false; } if (isProcessing()) { NVBLAST_LOG_ERROR("TkGroup::addActor: cannot alter Group while processing."); return false; } // mark the actor that it now belongs to this group addActorInternal(tkActor); // actors that were fractured already or have damage requested // must be enqueued to be processed if (tkActor.isPending()) { enqueue(&tkActor); } TkFamilyImpl& family = tkActor.getFamilyImpl(); SharedMemory* mem = m_sharedMemory[&family]; if (mem == nullptr) { // the actor belongs to a family not involved in this group yet // shared memory must be allocated and temporary buffers adjusted accordingly BLAST_PROFILE_ZONE_BEGIN("family memory"); mem = NVBLAST_NEW(SharedMemory); mem->allocate(family); m_sharedMemory[&family] = mem; BLAST_PROFILE_ZONE_END("family memory"); BLAST_PROFILE_ZONE_BEGIN("group memory"); const uint32_t workerCount = m_workers.size(); NvBlastLog theLog = logLL; // this group's tasks will use one temporary buffer each, which is of max size of, for all families involved const size_t requiredScratch = NvBlastActorGetRequiredScratchForSplit(tkActor.getActorLL(), theLog); if (static_cast<size_t>(m_splitScratchBlock.numElementsPerBlock()) < requiredScratch) { m_splitScratchBlock.release(); m_splitScratchBlock.allocate(static_cast<uint32_t>(requiredScratch), workerCount); } // generate and apply fracture may create an entry for each bond const uint32_t bondCount = NvBlastAssetGetBondCount(tkActor.getAsset()->getAssetLL(), theLog); if (m_bondTempDataBlock.numElementsPerBlock() < bondCount) { m_bondTempDataBlock.release(); m_bondTempDataBlock.allocate(bondCount, workerCount); m_bondEventDataBlock.release(); m_bondEventDataBlock.allocate(bondCount, workerCount); } // apply fracture may create an entry for each lower-support chunk const uint32_t graphNodeCount = NvBlastAssetGetSupportGraph(tkActor.getAsset()->getAssetLL(), theLog).nodeCount; const uint32_t subsupportChunkCount = NvBlastAssetGetChunkCount(tkActor.getAsset()->getAssetLL(), theLog) - NvBlastAssetGetFirstSubsupportChunkIndex(tkActor.getAsset()->getAssetLL(), theLog); const uint32_t chunkCount = graphNodeCount + subsupportChunkCount; if (m_chunkTempDataBlock.numElementsPerBlock() < chunkCount) { m_chunkTempDataBlock.release(); m_chunkTempDataBlock.allocate(chunkCount, workerCount); m_chunkEventDataBlock.release(); m_chunkEventDataBlock.allocate(chunkCount, workerCount); } BLAST_PROFILE_ZONE_END("group memory"); } mem->addReference(); return true; } uint32_t TkGroupImpl::getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart /* = 0 */) const { BLAST_PROFILE_SCOPE_L("TkGroup::getActors"); uint32_t actorCount = m_actorCount; if (actorCount <= indexStart) { NVBLAST_LOG_WARNING("TkGroup::getActors: indexStart beyond end of actor list."); return 0; } actorCount -= indexStart; if (actorCount > bufferSize) { actorCount = bufferSize; } uint32_t index = 0; bool done = false; for (auto it = const_cast<TkGroupImpl*>(this)->m_sharedMemory.getIterator(); !it.done();++it) { TkFamilyImpl* fam = it->first; for (TkActorImpl& actor : fam->getActorsInternal()) { if (actor.m_group == this) { NVBLAST_ASSERT(actor.isActive()); if (index >= indexStart) { *buffer++ = &actor; } index++; done = (index - indexStart) >= actorCount; } if (done) break; } if (done) break; } return actorCount; } void TkGroupImpl::removeActorInternal(TkActorImpl& tkActor) { NVBLAST_ASSERT(tkActor.m_group == this); tkActor.m_group = nullptr; m_actorCount--; } void TkGroupImpl::releaseSharedMemory(TkFamilyImpl* fam, SharedMemory* mem) { NVBLAST_ASSERT(mem != nullptr && m_sharedMemory[fam] == mem); mem->release(); m_sharedMemory.erase(fam); NVBLAST_DELETE(mem, SharedMemory); } bool TkGroupImpl::removeActor(TkActor& actor) { TkActorImpl& tkActor = static_cast<TkActorImpl&>(actor); if (tkActor.getGroup() != this) { NVBLAST_LOG_ERROR("TkGroup::removeActor: actor does not belong to this Group."); return false; } if (isProcessing()) { NVBLAST_LOG_ERROR("TkGroup::removeActor: cannot alter Group while processing."); return false; } removeActorInternal(tkActor); // pending actors must be removed from the job queue as well if(tkActor.isPending()) { uint32_t index = tkActor.m_groupJobIndex; tkActor.m_groupJobIndex = invalidIndex<uint32_t>(); if (index < m_jobs.size()) { m_jobs.replaceWithLast(index); if (index < m_jobs.size()) { NVBLAST_ASSERT(m_jobs[index].m_tkActor->m_groupJobIndex == m_jobs.size()); NVBLAST_ASSERT(m_jobs[index].m_tkActor->isPending()); m_jobs[index].m_tkActor->m_groupJobIndex = index; } } } // if the actor is the last of its family in this group // the group-family memory can be released TkFamilyImpl* family = &tkActor.getFamilyImpl(); SharedMemory* mem = getSharedMemory(family); if (mem->removeReference()) { releaseSharedMemory(family, mem); } return true; } TkGroupImpl* TkGroupImpl::create(const TkGroupDesc& desc) { TkGroupImpl* group = NVBLAST_NEW(TkGroupImpl); group->setWorkerCount(desc.workerCount); return group; } void TkGroupImpl::setWorkerCount(uint32_t workerCount) { if (isProcessing()) { NVBLAST_LOG_WARNING("TkGroup::setWorkerCount: Group is still processing, call TkGroup::endProcess first."); return; } if (workerCount == 0) { NVBLAST_LOG_WARNING("TkGroup: attempting to create a Group with 0 workers. Forced to 1."); workerCount = 1; } if (workerCount != m_workers.size()) { m_workers.resize(workerCount); uint32_t workerId = 0; for (auto& worker : m_workers) { worker.m_id = workerId++; worker.m_group = this; } const uint32_t bondCount = m_bondTempDataBlock.numElementsPerBlock(); if (bondCount > 0) { m_bondTempDataBlock.release(); m_bondTempDataBlock.allocate(bondCount, workerCount); m_bondEventDataBlock.release(); m_bondEventDataBlock.allocate(bondCount, workerCount); } const uint32_t chunkCount = m_chunkTempDataBlock.numElementsPerBlock(); if (chunkCount > 0) { m_chunkTempDataBlock.release(); m_chunkTempDataBlock.allocate(chunkCount, workerCount); m_chunkEventDataBlock.release(); m_chunkEventDataBlock.allocate(chunkCount, workerCount); } const uint32_t scratchSize = m_splitScratchBlock.numElementsPerBlock(); if (scratchSize > 0) { m_splitScratchBlock.release(); m_splitScratchBlock.allocate(scratchSize, workerCount); } } } NV_INLINE uint32_t TkGroupImpl::getWorkerCount() const { return m_workers.size(); } uint32_t TkGroupImpl::startProcess() { BLAST_PROFILE_SCOPE_L("TkGroup::startProcess"); if (!setProcessing(true)) { NVBLAST_LOG_WARNING("TkGroup::process: Group is still processing, call TkGroup::endProcess first."); return 0; } if (m_jobs.size() > 0) { BLAST_PROFILE_ZONE_BEGIN("task setup"); BLAST_PROFILE_ZONE_BEGIN("setup job queue"); for (const auto& job : m_jobs) { const TkActorImpl* a = job.m_tkActor; SharedMemory* mem = getSharedMemory(&a->getFamilyImpl()); const uint32_t damageCount = a->m_damageBuffer.size(); // applyFracture'd actor do not necessarily have damage queued NVBLAST_ASSERT(damageCount > 0 || a->m_flags.isSet(TkActorFlag::DAMAGED)); // no reason to be here without these NVBLAST_ASSERT(a->m_flags.isSet(TkActorFlag::PENDING)); NVBLAST_ASSERT(a->m_group == this); // collect the amount of event payload memory to preallocate for TkWorkers mem->m_eventsMemory += damageCount * (sizeof(TkFractureCommands) + sizeof(TkFractureEvents)) + sizeof(TkSplitEvent); // collect the amount of event entries to preallocate for TkWorkers // (two TkFracture* events per damage plus one TkSplitEvent) mem->m_eventsCount += 2 * damageCount + 1; } BLAST_PROFILE_ZONE_END("setup job queue"); BLAST_PROFILE_ZONE_BEGIN("memory protect"); for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it) { // preallocate the event memory for TkWorkers SharedMemory* mem = it->second; mem->m_events.reserveData(mem->m_eventsMemory); mem->m_events.reserveEvents(mem->m_eventsCount); // these counters are not used anymore // reset them immediately for next time mem->m_eventsCount = 0; mem->m_eventsMemory = 0; // switch to parallel mode mem->m_events.protect(true); } BLAST_PROFILE_ZONE_END("memory protect"); BLAST_PROFILE_ZONE_END("task setup"); for (auto&worker : m_workers) { worker.initialize(); } return m_jobs.size(); } else { bool success = setProcessing(false); NVBLAST_ASSERT(success); NV_UNUSED(success); return 0; } } bool TkGroupImpl::endProcess() { if (isProcessing()) { BLAST_PROFILE_SCOPE_L("TkGroupImpl::endProcess"); if (m_jobs.size() > 0) { #if NV_PROFILE BLAST_PROFILE_ZONE_BEGIN("accumulate timers"); NvBlastTimers accumulated; NvBlastTimersReset(&accumulated); uint32_t jobCount = 0; int64_t workerTime = 0; for (TkWorker& worker : m_workers) { accumulated += worker.m_stats.timers; jobCount += worker.m_stats.processedActorsCount; workerTime += worker.m_stats.workerTime; } m_stats.timers = accumulated; m_stats.processedActorsCount = jobCount; m_stats.workerTime = workerTime; BLAST_PROFILE_ZONE_END("accumulate timers"); #endif BLAST_PROFILE_ZONE_BEGIN("job update"); for (auto& j : m_jobs) { if (j.m_newActorsCount) { TkFamilyImpl* fam = &j.m_tkActor->getFamilyImpl(); SharedMemory* mem = getSharedMemory(fam); // as LL is implemented, where newActorsCount the parent is always deleted removeActorInternal(*j.m_tkActor); mem->removeReference(); addActorsInternal(j.m_newActors, j.m_newActorsCount); mem->addReference(j.m_newActorsCount); // Update joints mem->m_events.protect(false); // allow allocations again BLAST_PROFILE_ZONE_BEGIN("updateJoints"); fam->updateJoints(j.m_tkActor, &mem->m_events); BLAST_PROFILE_ZONE_END("updateJoints"); } // virtually dequeue the actor // the queue itself is cleared right after this loop j.m_tkActor->m_flags.clear(TkActorFlag::PENDING); j.m_tkActor->m_groupJobIndex = invalidIndex<uint32_t>(); BLAST_PROFILE_ZONE_BEGIN("damageBuffer.clear"); j.m_tkActor->m_damageBuffer.clear(); BLAST_PROFILE_ZONE_END("damageBuffer.clear"); } m_jobs.clear(); BLAST_PROFILE_ZONE_END("job update"); BLAST_PROFILE_ZONE_BEGIN("event dispatch"); for (auto it = m_sharedMemory.getIterator(); !it.done(); ++it) { BLAST_PROFILE_SCOPE_L("event dispatch"); TkFamilyImpl* family = it->first; SharedMemory* mem = it->second; NVBLAST_ASSERT(family != nullptr); NVBLAST_ASSERT(mem != nullptr && mem->isUsed()); // where no actor of a family has split, // its group/family event queue has not been // unprotected in the jobs loop above mem->m_events.protect(false); family->getQueue().dispatch(mem->m_events); mem->m_events.reset(); mem->reset(); } BLAST_PROFILE_ZONE_END("event dispatch"); BLAST_PROFILE_ZONE_BEGIN("event memory release"); for (auto& worker : m_workers) { worker.m_bondBuffer.clear(); worker.m_chunkBuffer.clear(); } BLAST_PROFILE_ZONE_END("event memory release"); } bool success = setProcessing(false); NVBLAST_ASSERT(success); return success; } return false; } bool TkGroupImpl::setProcessing(bool value) { bool expected = !value; return m_isProcessing.compare_exchange_strong(expected, value); } void TkGroupImpl::enqueue(TkActorImpl* tkActor) { NVBLAST_ASSERT(tkActor->getGroupImpl() != nullptr); NVBLAST_ASSERT(tkActor->getGroupImpl() == this); NVBLAST_ASSERT(isInvalidIndex(tkActor->m_groupJobIndex)); NVBLAST_ASSERT(isProcessing() == false); #if NV_DEBUG for (TkWorkerJob& j : m_jobs) { NVBLAST_ASSERT(j.m_tkActor != tkActor); } #endif tkActor->m_groupJobIndex = m_jobs.size(); TkWorkerJob& j = m_jobs.insert(); j.m_tkActor = tkActor; } TkGroupWorker* TkGroupImpl::acquireWorker() { BLAST_PROFILE_SCOPE_L("TkGroupImpl::acquireWorker"); std::unique_lock<std::mutex> lk(m_workerMtx); for (auto& worker:m_workers) { if (!worker.m_isBusy) { worker.m_isBusy = true; return &worker; } } return nullptr; } void TkGroupImpl::returnWorker(TkGroupWorker* worker) { BLAST_PROFILE_SCOPE_L("TkGroupImpl::returnWorker"); std::unique_lock<std::mutex> lk(m_workerMtx); auto w = static_cast<TkWorker*>(worker); NVBLAST_CHECK_WARNING(w->m_group == this, "TkGroup::returnWorker worker does not belong to this group.", return); w->m_isBusy = false; } } // namespace Blast } // namespace Nv
19,115
C++
30.082927
128
0.611405
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTaskImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTime.h" #include "NvBlastTkTaskImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkGroupImpl.h" using namespace Nv::Blast; void SharedMemory::allocate(TkFamilyImpl& tkFamily) { NVBLAST_ASSERT(m_refCount == 0); const NvBlastAsset* assetLL = tkFamily.getAsset()->getAssetLL(); // at most leafChunkCount actors can be created within a family // tasks will grab their portion out of these memory blocks uint32_t leafChunkCount = NvBlastAssetGetLeafChunkCount(assetLL, logLL); m_newActorBuffers.allocate(2 * leafChunkCount); // GWD-167 workaround (2*) m_newTkActorBuffers.allocate(leafChunkCount); } /** Creates a TkEvent::FractureCommand according to the input buffer for tkActor into events queue using the LocalBuffers to store the actual event data. */ NV_FORCE_INLINE void reportFractureCommands( const NvBlastFractureBuffers& buffer, LocalBuffer<NvBlastBondFractureData>& bondBuffer, LocalBuffer<NvBlastChunkFractureData>& chunkBuffer, TkEventQueue& events, const TkActorImpl* tkActor) { NvBlastBondFractureData* bdata = nullptr; if (buffer.bondFractureCount > 0) { bdata = bondBuffer.allocate(buffer.bondFractureCount); memcpy(bdata, buffer.bondFractures, sizeof(NvBlastBondFractureData)*buffer.bondFractureCount); } NvBlastChunkFractureData* cdata = nullptr; if (buffer.chunkFractureCount > 0) { cdata = chunkBuffer.allocate(buffer.chunkFractureCount); memcpy(cdata, buffer.chunkFractures, sizeof(NvBlastChunkFractureData)*buffer.chunkFractureCount); } TkFractureCommands* fevt = events.allocData<TkFractureCommands>(); fevt->tkActorData = *tkActor; fevt->buffers = { buffer.bondFractureCount, buffer.chunkFractureCount, bdata, cdata }; events.addEvent(fevt); } /** Creates a TkEvent::FractureEvent according to the input buffer for tkActor into events queue using the LocalBuffers to store the actual event data. */ NV_FORCE_INLINE void reportFractureEvents( const NvBlastFractureBuffers& buffer, LocalBuffer<NvBlastBondFractureData>& bondBuffer, LocalBuffer<NvBlastChunkFractureData>& chunkBuffer, TkEventQueue& events, const TkActorImpl* tkActor) { uint32_t result[4] = { 0,0,0,0 }; NvBlastBondFractureData* bdata = nullptr; if (buffer.bondFractureCount > 0) { bdata = bondBuffer.allocate(buffer.bondFractureCount); for (uint32_t b = 0; b < buffer.bondFractureCount; ++b) { bdata[b] = buffer.bondFractures[b]; result[buffer.bondFractures[b].health > 0 ? 0 : 1]++; } } NvBlastChunkFractureData* cdata = nullptr; if (buffer.chunkFractureCount > 0) { cdata = chunkBuffer.allocate(buffer.chunkFractureCount); for (uint32_t c = 0; c < buffer.chunkFractureCount; ++c) { cdata[c] = buffer.chunkFractures[c]; result[buffer.chunkFractures[c].health > 0 ? 2 : 3]++; } } TkFractureEvents* fevt = events.allocData<TkFractureEvents>(); fevt->tkActorData = *tkActor; fevt->buffers = { buffer.bondFractureCount, buffer.chunkFractureCount, bdata, cdata }; fevt->bondsDamaged = result[0]; fevt->bondsBroken = result[1]; fevt->chunksDamaged = result[2]; fevt->chunksBroken = result[3]; events.addEvent(fevt); } void TkWorker::initialize() { // temporary memory used to generate and apply fractures // it must fit for the largest family involved in the group that owns this worker NvBlastBondFractureData* bondFractureData = m_group->m_bondTempDataBlock.getBlock(m_id); uint32_t bondFractureCount = m_group->m_bondTempDataBlock.numElementsPerBlock(); NvBlastChunkFractureData* chunkFractureData = m_group->m_chunkTempDataBlock.getBlock(m_id); uint32_t chunkFractureCount = m_group->m_chunkTempDataBlock.numElementsPerBlock(); m_tempBuffer = { bondFractureCount, chunkFractureCount, bondFractureData, chunkFractureData }; // temporary memory used to split the actor // large enough for the largest family involved m_splitScratch = m_group->m_splitScratchBlock.getBlock(m_id); // to avoid unnecessary allocations, preallocated memory exists to fit all chunks and bonds taking damage once // where multiple damage occurs, more memory will be allocated on demand (this may thwart other threads doing the same) m_bondBuffer.initialize(m_group->m_bondEventDataBlock.getBlock(m_id), m_group->m_bondEventDataBlock.numElementsPerBlock()); m_chunkBuffer.initialize(m_group->m_chunkEventDataBlock.getBlock(m_id), m_group->m_chunkEventDataBlock.numElementsPerBlock()); #if NV_PROFILE NvBlastTimersReset(&m_stats.timers); m_stats.processedActorsCount = 0; #endif } void TkWorker::process(TkWorkerJob& j) { NvBlastTimers* timers = nullptr; BLAST_PROFILE_SCOPE_M("TkActor"); TkActorImpl* tkActor = j.m_tkActor; const uint32_t tkActorIndex = tkActor->getIndex(); NvBlastActor* actorLL = tkActor->getActorLLInternal(); TkFamilyImpl& family = tkActor->getFamilyImpl(); SharedMemory* mem = m_group->getSharedMemory(&family); TkEventQueue& events = mem->m_events; NVBLAST_ASSERT(tkActor->getGroupImpl() == m_group); NVBLAST_ASSERT(tkActor->m_flags.isSet(TkActorFlag::PENDING)); #if NV_PROFILE timers = &m_stats.timers; *timers += tkActor->m_timers; NvBlastTimersReset(&tkActor->m_timers); m_stats.processedActorsCount++; #endif // generate and apply fracture for all damage requested on this actor // and queue events accordingly for (const auto& damage : tkActor->m_damageBuffer) { NvBlastFractureBuffers commandBuffer = m_tempBuffer; BLAST_PROFILE_ZONE_BEGIN("Material"); NvBlastActorGenerateFracture(&commandBuffer, actorLL, damage.program, damage.programParams, logLL, timers); BLAST_PROFILE_ZONE_END("Material"); if (commandBuffer.chunkFractureCount > 0 || commandBuffer.bondFractureCount > 0) { BLAST_PROFILE_SCOPE_M("Fill Command Events"); reportFractureCommands(commandBuffer, m_bondBuffer, m_chunkBuffer, events, tkActor); } NvBlastFractureBuffers eventBuffer = m_tempBuffer; BLAST_PROFILE_ZONE_BEGIN("Fracture"); NvBlastActorApplyFracture(&eventBuffer, actorLL, &commandBuffer, logLL, timers); BLAST_PROFILE_ZONE_END("Fracture"); if (eventBuffer.chunkFractureCount > 0 || eventBuffer.bondFractureCount > 0) { BLAST_PROFILE_SCOPE_M("Fill Fracture Events"); tkActor->m_flags |= (TkActorFlag::DAMAGED); reportFractureEvents(eventBuffer, m_bondBuffer, m_chunkBuffer, events, tkActor); } } // split the actor, which could have been damaged directly though the TkActor's fracture functions // i.e. it did not have damage queued for the above loop NvBlastActorSplitEvent splitEvent = { nullptr, nullptr }; if (tkActor->isDamaged()) { BLAST_PROFILE_ZONE_BEGIN("Split Memory"); uint32_t maxActorCount = NvBlastActorGetMaxActorCountForSplit(actorLL, logLL); splitEvent.newActors = mem->reserveNewActors(maxActorCount); BLAST_PROFILE_ZONE_END("Split Memory"); BLAST_PROFILE_ZONE_BEGIN("Split"); j.m_newActorsCount = NvBlastActorSplit(&splitEvent, actorLL, maxActorCount, m_splitScratch, logLL, timers); BLAST_PROFILE_ZONE_END("Split"); tkActor->m_flags.clear(TkActorFlag::DAMAGED); } else { j.m_newActorsCount = 0; } // update the TkActor according to the LL split results and queue events accordingly if (j.m_newActorsCount > 0) { NVBLAST_ASSERT(splitEvent.deletedActor == tkActor->getActorLL()); BLAST_PROFILE_ZONE_BEGIN("memory new actors"); auto tkSplitEvent = events.allocData<TkSplitEvent>(); tkSplitEvent->children = mem->reserveNewTkActors(j.m_newActorsCount); tkSplitEvent->numChildren = j.m_newActorsCount; tkSplitEvent->parentData.family = &family; tkSplitEvent->parentData.userData = tkActor->userData; tkSplitEvent->parentData.index = tkActorIndex; family.removeActor(tkActor); BLAST_PROFILE_ZONE_END("memory new actors"); BLAST_PROFILE_ZONE_BEGIN("create new actors"); for (uint32_t i = 0; i < j.m_newActorsCount; ++i) { TkActorImpl* newActor = family.addActor(splitEvent.newActors[i]); tkSplitEvent->children[i] = newActor; } j.m_newActors = reinterpret_cast<TkActorImpl**>(tkSplitEvent->children); BLAST_PROFILE_ZONE_END("create new actors"); BLAST_PROFILE_ZONE_BEGIN("split event"); events.addEvent(tkSplitEvent); BLAST_PROFILE_ZONE_END("split event"); } j.m_tkActor->m_flags.clear(TkActorFlag::PENDING); } void TkWorker::process(uint32_t jobID) { TkWorkerJob& j = m_group->m_jobs[jobID]; process(j); }
10,597
C++
38.107011
130
0.70888
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkActorImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKACTORIMPL_H #define NVBLASTTKACTORIMPL_H #include "NvBlastTkCommon.h" #include "NvBlastAssert.h" #include "NvBlastDLink.h" #include "NvBlastIteratorBase.h" #include "NvBlastTkJointImpl.h" #include "NvBlast.h" #include "NvBlastTkActor.h" #include "NvFlags.h" namespace Nv { namespace Blast { // Forward declarations: class TkGroupImpl; class TkFamilyImpl; class TkAssetImpl; class TkJointImpl; /** Struct-enum for actor status flags, used in TkGroup processing. */ struct TkActorFlag { enum Enum { DAMAGED = (1 << 0), //!< The actor had fractures applied successfully and will take the split step. PENDING = (1 << 1), //!< The actor will be processed when its group executes, used to update job queues when moving group. }; }; /** Implementation of TkActor. */ class TkActorImpl : public TkActor { public: TkActorImpl(); ~TkActorImpl(); // Begin TkActor virtual const NvBlastActor* getActorLL() const override; virtual TkFamily& getFamily() const override; virtual uint32_t getIndex() const override; virtual TkGroup* getGroup() const override; virtual TkGroup* removeFromGroup() override; virtual const TkAsset* getAsset() const override; virtual uint32_t getVisibleChunkCount() const override; virtual uint32_t getVisibleChunkIndices(uint32_t* visibleChunkIndices, uint32_t visibleChunkIndicesSize) const override; virtual uint32_t getGraphNodeCount() const override; virtual uint32_t getGraphNodeIndices(uint32_t* graphNodeIndices, uint32_t graphNodeIndicesSize) const override; virtual const float* getBondHealths() const override; virtual uint32_t getSplitMaxActorCount() const override; virtual void damage(const NvBlastDamageProgram& program, const void* programParams) override; virtual bool isPending() const override; virtual void generateFracture(NvBlastFractureBuffers* commands, const NvBlastDamageProgram& program, const void* programParams) const override; virtual void applyFracture(NvBlastFractureBuffers* eventBuffers, const NvBlastFractureBuffers* commands) override; virtual uint32_t getJointCount() const override; virtual uint32_t getJoints(TkJoint** joints, uint32_t jointsSize) const override; virtual bool hasExternalBonds() const override; // End TkActor // Begin TkObject virtual void release() override; // End TkObject // Public methods /** Factory create method. \param[in] desc Actor descriptor set by the user. \return a pointer to a new TkActorImpl object if successful, NULL otherwise. */ static TkActorImpl* create(const TkActorDesc& desc); /** TkActorImpl objects are created in an array within a TkFamilyImpl. Actors may become 'inactive' without their memory being freed. If inactive, the actor should be treated as if it has been released. \return the active status of this TkActorImpl. */ bool isActive() const; /** Utility to return the low-level family to which the low-level actor belongs. \return a pointer to the NvBlastFamily to which the low-level actor belongs. */ NvBlastFamily* getFamilyLL() const; /** Utility to access the TkFamily to which this actor belongs. \return a reference to the TkFamilyImpl to which this TkActorImpl belongs. */ TkFamilyImpl& getFamilyImpl() const; /** \return the index of this actor with its TkFamilyImpl. */ uint32_t getIndexInternal() const; /** Access to the group to which this actor belongs, if any. \return a pointer to the TkGroupImpl to which this TkActorImpl belongs, if any. If this actor is not in a group, this function returns NULL. */ TkGroupImpl* getGroupImpl() const; /** Access to the low-level actor associated with this TkActorImpl. \return a pointer to the NvBlastActor associated with this TkActorImpl. If this actor is inactive (see isActive), this function returns NULL. */ NvBlastActor* getActorLLInternal() const; /** \return the number of TkJointImpl objects that reference this actor. */ uint32_t getJointCountInternal() const; /** Joint iterator. Usage: Given a TkActorImpl a, for (TkActorImpl::JointIt i(a); (bool)i; ++i) { TkJointImpl* joint = (TkJointImpl*)i; // ... } */ class JointIt : public DList::It { public: /** Constructed from an actor. */ JointIt(const TkActorImpl& actor, Direction dir = Forward); /** Current joint. */ TkJointImpl* operator * () const; }; /** Implicit converter to TkActorData for events. */ operator Nv::Blast::TkActorData() const; private: /** Functions to raise or check 'damaged' state: this actor will take the split step. 'damaged' actors automatically become 'pending' also. */ void markAsDamaged(); bool isDamaged() const; /** Raise actor to 'pending' state: this actor will be processed when its group executes next. Enqueues the actor in its group's job list if a group is set. Otherwise the group will enqueue the actor when it is added. */ void makePending(); /** Functions to add or remove an internal reference to a joint. (Joints and actors mutually reference each other.) */ void addJoint(TkJointLink& jointLink); void removeJoint(TkJointLink& jointLink); struct DamageData { NvBlastDamageProgram program; const void* programParams; }; // Data NvBlastActor* m_actorLL; //!< The low-level actor associated with this actor TkFamilyImpl* m_family; //!< The TkFamilyImpl to which this actor belongs TkGroupImpl* m_group; //!< The TkGroupImpl (if any) to which this actor belongs uint32_t m_groupJobIndex; //!< The index of this actor's job within its group's job list nvidia::NvFlags<TkActorFlag::Enum, char> m_flags; //!< Status flags for this actor Array<DamageData>::type m_damageBuffer; //!< Buffered damage input uint32_t m_jointCount; //!< The number of joints referenced in m_jointList DList m_jointList; //!< A doubly-linked list of joint references //#if NV_PROFILE NvBlastTimers m_timers; //!< If profiling, each actor stores timing data //#endif friend class TkWorker; // m_damageBuffer and m_flags friend class TkGroupImpl; friend class TkFamilyImpl; friend class TkJointImpl; friend class TkFrameworkImpl; }; //////// TkActorImpl inline methods //////// NV_INLINE TkFamilyImpl& TkActorImpl::getFamilyImpl() const { NVBLAST_ASSERT(m_family != nullptr); return *m_family; } NV_INLINE uint32_t TkActorImpl::getIndexInternal() const { NVBLAST_ASSERT(isActive()); return NvBlastActorGetIndex(m_actorLL, logLL); } NV_INLINE NvBlastActor* TkActorImpl::getActorLLInternal() const { return m_actorLL; } NV_INLINE uint32_t TkActorImpl::getJointCountInternal() const { return m_jointCount; } NV_INLINE TkGroupImpl* TkActorImpl::getGroupImpl() const { return m_group; } NV_INLINE bool TkActorImpl::isActive() const { return m_actorLL != nullptr; } NV_INLINE bool TkActorImpl::isPending() const { return m_flags.isSet(TkActorFlag::PENDING); } NV_INLINE void TkActorImpl::addJoint(TkJointLink& jointLink) { NVBLAST_ASSERT(m_jointList.isSolitary(jointLink)); m_jointList.insertHead(jointLink); ++m_jointCount; } NV_INLINE void TkActorImpl::removeJoint(TkJointLink& jointLink) { NVBLAST_ASSERT(!m_jointList.isSolitary(jointLink)); NVBLAST_ASSERT(m_jointCount > 0); if (m_jointCount > 0) { --m_jointCount; m_jointList.remove(jointLink); } } //////// TkActorImpl::JointIt methods //////// NV_INLINE TkActorImpl::JointIt::JointIt(const TkActorImpl& actor, Direction dir) : DList::It(actor.m_jointList, dir) {} NV_INLINE TkJointImpl* TkActorImpl::JointIt::operator * () const { const DLink* link = (const DLink*)(*this); return reinterpret_cast<const TkJointLink*>(link)->m_joint; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKACTORIMPL_H
10,565
C
29.894737
162
0.654614
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkCommon.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKCOMMON_H #define NVBLASTTKCOMMON_H #include "NvBlastGlobals.h" #include "NvBlastTkGUID.h" // Macro to define standard object classes. An intermediate class is defined which holds common implementations. #define NVBLASTTK_IMPL_DECLARE(_name) \ class Tk##_name##Type : public Tk##_name \ { \ public: \ /* Blank constructor generates a new NvBlastID and informs framework */ \ Tk##_name##Type() \ { \ memset(&m_ID, 0, sizeof(NvBlastID)); \ setID(TkGenerateGUID(this)); \ TkFrameworkImpl::get()->onCreate(*this); \ } \ \ /* This constructor takes an existing NvBlastID and informs framework */ \ Tk##_name##Type(const NvBlastID& id) \ { \ memset(&m_ID, 0, sizeof(NvBlastID)); \ setID(id); \ TkFrameworkImpl::get()->onCreate(*this); \ } \ \ /* Destructor informs framework */ \ ~Tk##_name##Type() { TkFrameworkImpl::get()->onDestroy(*this); } \ \ /* Begin TkIdentifiable */ \ virtual void setID(const NvBlastID& id) override \ { \ /* Inform framework of ID change */ \ TkFrameworkImpl::get()->onIDChange(*this, m_ID, id); \ m_ID = id; \ } \ virtual const NvBlastID& getID() const override { return getIDInternal(); } \ virtual const TkType& getType() const override { return s_type; } \ /* End TkIdentifiable */ \ \ /* Begin public API */ \ \ /* Inline method for internal access to NvBlastID */ \ const NvBlastID& getIDInternal() const { return m_ID; } \ \ /* End public API */ \ \ /* Static type information */ \ static TkTypeImpl s_type; \ \ private: \ NvBlastID m_ID; /* NvBlastID for a TkIdentifiable object */ \ }; \ \ /* Derive object implementation from common implementation class above */ \ class Tk##_name##Impl final : public Tk##_name##Type // Macro to declare standard object interfaces, enums, etc. #define NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE(_id0, _id1, _id2, _id3) \ /* Begin TkObject */ \ virtual void release() override; \ /* End TkObject */ \ \ /* Enums */ \ \ /* Generate a ClassID enum used to identify this TkIdentifiable. */ \ enum { ClassID = NVBLAST_FOURCC(_id0, _id1, _id2, _id3) } // Macro to define class type data #define NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(_name) \ TkTypeImpl Tk##_name##Type::s_type("Tk" #_name, Tk##_name##Impl::ClassID, 0) #endif // ifndef NVBLASTTKCOMMON_H
6,979
C
64.233644
113
0.391317
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkAssetImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlast.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { //////// Static data //////// NVBLASTTK_DEFINE_TYPE_IDENTIFIABLE(Asset); //////// Member functions //////// TkAssetImpl::TkAssetImpl() : m_assetLL(nullptr), m_ownsAsset(false) { } TkAssetImpl::TkAssetImpl(const NvBlastID& id) : TkAssetType(id), m_assetLL(nullptr), m_ownsAsset(false) { } TkAssetImpl::~TkAssetImpl() { if (m_assetLL != nullptr && m_ownsAsset) { NVBLAST_FREE(m_assetLL); } } const NvBlastAsset* TkAssetImpl::getAssetLL() const { return getAssetLLInternal(); } uint32_t TkAssetImpl::getChunkCount() const { return NvBlastAssetGetChunkCount(m_assetLL, logLL); } uint32_t TkAssetImpl::getLeafChunkCount() const { return NvBlastAssetGetLeafChunkCount(m_assetLL, logLL); } uint32_t TkAssetImpl::getBondCount() const { return NvBlastAssetGetBondCount(m_assetLL, logLL); } const NvBlastChunk* TkAssetImpl::getChunks() const { return NvBlastAssetGetChunks(m_assetLL, logLL); } const NvBlastBond* TkAssetImpl::getBonds() const { return NvBlastAssetGetBonds(m_assetLL, logLL); } const NvBlastSupportGraph TkAssetImpl::getGraph() const { return NvBlastAssetGetSupportGraph(m_assetLL, logLL); } uint32_t TkAssetImpl::getDataSize() const { return NvBlastAssetGetSize(m_assetLL, logLL); } uint32_t TkAssetImpl::getJointDescCount() const { return getJointDescCountInternal(); } const TkAssetJointDesc* TkAssetImpl::getJointDescs() const { return getJointDescsInternal(); } void TkAssetImpl::release() { const TkType& tkType = TkFamilyImpl::s_type; const uint32_t num = TkFrameworkImpl::get()->getObjectCount(tkType); if (num) { Array<TkIdentifiable*>::type dependents(num); TkFrameworkImpl::get()->getObjects(dependents.begin(), dependents.size(), tkType); for (TkObject* o : dependents) { TkFamilyImpl* f = static_cast<TkFamilyImpl*>(o); if (f->getAssetImpl() == this) { f->release(); } } } NVBLAST_DELETE(this, TkAssetImpl); } //////// Static functions //////// TkAssetImpl* TkAssetImpl::create(const TkAssetDesc& desc) { TkAssetImpl* asset = NVBLAST_NEW(TkAssetImpl); Array<char>::type scratch((uint32_t)NvBlastGetRequiredScratchForCreateAsset(&desc, logLL)); void* mem = NVBLAST_ALLOC_NAMED(NvBlastGetAssetMemorySize(&desc, logLL), "TkAssetImpl::create"); asset->m_assetLL = NvBlastCreateAsset(mem, &desc, scratch.begin(), logLL); if (asset->m_assetLL == nullptr) { NVBLAST_LOG_ERROR("TkAssetImpl::create: low-level asset could not be created."); asset->release(); return nullptr; } if (desc.bondFlags != nullptr) { for (uint32_t bondN = 0; bondN < desc.bondCount; ++bondN) { if (0 != (desc.bondFlags[bondN] & TkAssetDesc::BondJointed)) { const NvBlastBondDesc& bondDesc = desc.bondDescs[bondN]; const uint32_t c0 = bondDesc.chunkIndices[0]; const uint32_t c1 = bondDesc.chunkIndices[1]; if (c0 >= desc.chunkCount || c1 >= desc.chunkCount) { NVBLAST_LOG_WARNING("TkAssetImpl::create: joint flag set for badly described bond. No joint descriptor created."); continue; } if (!asset->addJointDesc(c0, c1)) { NVBLAST_LOG_WARNING("TkAssetImpl::create: no bond corresponds to the user-described bond indices. No joint descriptor created."); } } } } asset->m_ownsAsset = true; // asset->setID(NvBlastAssetGetID(asset->m_assetLL, logLL)); // Keeping LL and Tk IDs distinct return asset; } TkAssetImpl* TkAssetImpl::create(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs, uint32_t jointDescCount, bool ownsAsset) { TkAssetImpl* asset = NVBLAST_NEW(TkAssetImpl); //NOTE: Why are we passing in a const NvBlastAsset* and then discarding the const? asset->m_assetLL = const_cast<NvBlastAsset*>(assetLL); if (asset->m_assetLL == nullptr) { NVBLAST_LOG_ERROR("TkAssetImpl::create: low-level asset could not be created."); asset->release(); return nullptr; } asset->m_ownsAsset = ownsAsset; asset->setID(NvBlastAssetGetID(asset->m_assetLL, logLL)); asset->m_jointDescs.resize(jointDescCount); for (uint32_t i = 0; i < asset->m_jointDescs.size(); ++i) { asset->m_jointDescs[i] = jointDescs[i]; } return asset; } bool TkAssetImpl::addJointDesc(uint32_t chunkIndex0, uint32_t chunkIndex1) { if (m_assetLL == nullptr) { return false; } const uint32_t upperSupportChunkCount = NvBlastAssetGetFirstSubsupportChunkIndex(m_assetLL, logLL); if (chunkIndex0 >= upperSupportChunkCount || chunkIndex1 >= upperSupportChunkCount) { return false; } const uint32_t* chunkToGraphNodeMap = NvBlastAssetGetChunkToGraphNodeMap(m_assetLL, logLL); const uint32_t node0 = chunkToGraphNodeMap[chunkIndex0]; const uint32_t node1 = chunkToGraphNodeMap[chunkIndex1]; const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(m_assetLL, logLL); if (node0 >= graph.nodeCount || node1 >= graph.nodeCount) { return false; } // Find bond index // Iterate through all neighbors of node0 chunk uint32_t bondIndex = 0xFFFFFFFF; for (uint32_t i = graph.adjacencyPartition[node0]; i < graph.adjacencyPartition[node0 + 1]; i++) { if (graph.adjacentNodeIndices[i] == node1) { bondIndex = graph.adjacentBondIndices[i]; break; } } if (bondIndex >= NvBlastAssetGetBondCount(m_assetLL, logLL)) { return false; } const NvBlastBond& bond = NvBlastAssetGetBonds(m_assetLL, logLL)[bondIndex]; TkAssetJointDesc jointDesc; jointDesc.attachPositions[0] = jointDesc.attachPositions[1] = nvidia::NvVec3(bond.centroid[0], bond.centroid[1], bond.centroid[2]); jointDesc.nodeIndices[0] = node0; jointDesc.nodeIndices[1] = node1; m_jointDescs.pushBack(jointDesc); return true; } } // namespace Blast } // namespace Nv
8,002
C++
27.996377
150
0.673957
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTypeImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTYPEIMPL_H #define NVBLASTTKTYPEIMPL_H #include "NvPreprocessor.h" #include "NvBlastTkType.h" namespace Nv { namespace Blast { /** Implementation of TkType, storing class information for TkIdentifiable-derived classes. */ class TkTypeImpl : public TkType { public: TkTypeImpl(const char* typeName, uint32_t typeID, uint32_t version); // Begin TkType virtual const char* getName() const override { return getNameInternal(); } virtual uint32_t getVersion() const override { return getVersionInternal(); } // End TkType // Public methods /** Access to the class name. \return a C string pointer to the class name. */ const char* getNameInternal() const; /** Access to the data format version for the class. \return the data format version. */ uint32_t getVersionInternal() const; /** Access to a unique identifier for the class (set using the NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE macro). \return the class's unique identifier. */ uint32_t getID() const; /** Access to a runtime-unique small index for the class. \return the index for the class. */ uint32_t getIndex() const; /** \return whether or not the index has been set (see setIndex) to a valid value. */ bool indexIsValid() const; private: enum { InvalidIndex = 0xFFFFFFFF }; /** Sets the type index. \param[in] index The index to set. */ void setIndex(uint32_t index); const char* m_name; //!< The name of the class, set by the constructor. uint32_t m_ID; //!< The unique identifier for the class, set by the constructor. uint32_t m_version; //!< The data format version for the class, set by the constructor. uint32_t m_index; //!< The index set for this class, set using setIndex(). friend class TkFrameworkImpl; }; //////// TkTypeImpl inline methods //////// NV_INLINE TkTypeImpl::TkTypeImpl(const char* typeName, uint32_t typeID, uint32_t version) : m_name(typeName) , m_ID(typeID) , m_version(version) , m_index((uint32_t)InvalidIndex) { } NV_INLINE const char* TkTypeImpl::getNameInternal() const { return m_name; } NV_INLINE uint32_t TkTypeImpl::getVersionInternal() const { return m_version; } NV_INLINE uint32_t TkTypeImpl::getID() const { return m_ID; } NV_INLINE uint32_t TkTypeImpl::getIndex() const { return m_index; } NV_INLINE bool TkTypeImpl::indexIsValid() const { return m_index != (uint32_t)InvalidIndex; } NV_INLINE void TkTypeImpl::setIndex(uint32_t index) { m_index = index; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKTYPEIMPL_H
4,415
C
26.428571
110
0.68154
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFrameworkImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKFRAMEWORKIMPL_H #define NVBLASTTKFRAMEWORKIMPL_H #include "NvBlastTkFramework.h" #include "NvBlastInternalProfiler.h" #include "NvBlastTkCommon.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include "NvBlastHashSet.h" namespace Nv { namespace Blast { // Forward declarations class TkTypeImpl; class TkJointImpl; /** Implementation of TkFramework */ class TkFrameworkImpl : public TkFramework { public: TkFrameworkImpl(); ~TkFrameworkImpl(); // Begin TkFramework virtual void release() override; virtual const TkType* getType(TkTypeIndex::Enum typeIndex) const override; virtual TkIdentifiable* findObjectByID(const NvBlastID& id) const override; virtual uint32_t getObjectCount(const TkType& type) const override; virtual uint32_t getObjects(TkIdentifiable** buffer, uint32_t bufferSize, const TkType& type, uint32_t indexStart = 0) const override; virtual bool reorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap = nullptr, bool keepBondNormalChunkOrder = false) const override; virtual bool ensureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount) const override; virtual TkAsset* createAsset(const TkAssetDesc& desc) override; virtual TkAsset* createAsset(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs = nullptr, uint32_t jointDescCount = 0, bool ownsAsset = false) override; virtual TkGroup* createGroup(const TkGroupDesc& desc) override; virtual TkActor* createActor(const TkActorDesc& desc) override; virtual TkJoint* createJoint(const TkJointDesc& desc) override; // End TkFramework // Public methods /** To be called by any TkIdentifiable object when it is created, so the framework can track it. */ void onCreate(TkIdentifiable& object); /** To be called by any TkIdentifiable object when it is deleted, so the framework can stop tracking it. */ void onDestroy(TkIdentifiable& object); /** Special onCreate method for joints, since they are not TkIdentifiable. */ void onCreate(TkJointImpl& joint); /** Special onDestroy method for joints, since they are not TkIdentifiable. */ void onDestroy(TkJointImpl& joint); /** Must be called whenever a TkIdentifiable object's ID is changed, so that the framework can associate the new ID with it. */ void onIDChange(TkIdentifiable& object, const NvBlastID& IDPrev, const NvBlastID& IDCurr); /** Internal (non-virtual) method to find a TkIdentifiable object based upon its NvBlastID. */ TkIdentifiable* findObjectByIDInternal(const NvBlastID& id) const; // Access to singleton /** Retrieve the global singleton. */ static TkFrameworkImpl* get(); /** Set the global singleton, if it's not already set, or set it to NULL. Returns true iff successful. */ static bool set(TkFrameworkImpl* framework); private: // Enums enum { ClassID = NVBLAST_FOURCC('T', 'K', 'F', 'W') }; //!< TkFramework identifier token, used in serialization // Static data static TkFrameworkImpl* s_framework; //!< Global (singleton) object pointer // Types InlineArray<const TkTypeImpl*, TkTypeIndex::TypeCount>::type m_types; //!< TkIdentifiable static type data HashMap<uint32_t, uint32_t>::type m_typeIDToIndex; //!< Map to type data keyed by ClassID // Objects and object names HashMap<NvBlastID, TkIdentifiable*>::type m_IDToObject; //!< Map to all TkIdentifiable objects, keyed by NvBlastID InlineArray<Array<TkIdentifiable*>::type, TkTypeIndex::TypeCount>::type m_objects; //!< Catalog of all TkIdentifiable objects, grouped by type. (Revisit implementation.) // Track external joints (to do: make this a pool) HashSet<TkJointImpl*>::type m_joints; //!< All internal joints }; //////// TkFrameworkImpl inline methods //////// NV_INLINE TkIdentifiable* TkFrameworkImpl::findObjectByIDInternal(const NvBlastID& id) const { const auto entry = m_IDToObject.find(id); if (entry == nullptr) { return nullptr; } return entry->second; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKFRAMEWORKIMPL_H
6,653
C
40.074074
253
0.650534
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTaskManager.cpp
// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. #include "NvTask.h" #include "NvTaskDefine.h" #include "NvCpuDispatcher.h" #include "NvGpuDispatcher.h" #include "NvErrorCallback.h" #include "NvBlastGlobals.h" #include "NvBlastAssert.h" #include "NvBlastAtomic.h" #include "NvBlastAllocator.h" #include "NvBlastArray.h" #include "NvBlastHashMap.h" #include <mutex> using namespace nvidia; using namespace nvidia::task; namespace Nv { namespace Blast { class MutexScopedLock { std::mutex& mMutex; NV_NOCOPY(MutexScopedLock) public: NV_INLINE MutexScopedLock(std::mutex& mutex) : mMutex(mutex) { mMutex.lock(); } NV_INLINE ~MutexScopedLock() { mMutex.unlock(); } }; #define LOCK() MutexScopedLock __lock__(mMutex) constexpr int EOL = -1; typedef HashMap<const char *, NvTaskID>::type NvBlastTkTaskNameToIDMap; struct NvBlastTkTaskDepTableRow { NvTaskID mTaskID; int mNextDep; }; typedef Array<NvBlastTkTaskDepTableRow>::type NvBlastTkTaskDepTable; struct NvTaskAccess : public NvTask { void setTaskID(NvTaskID taskID) { mTaskID = taskID; } void setTm(NvTaskManager* tm) { mTm = tm; } }; NvTaskAccess& ACCESS(NvTask& task) { return reinterpret_cast<NvTaskAccess&>(task); } NvTaskAccess* ACCESS(NvTask* task) { return reinterpret_cast<NvTaskAccess*>(task); } struct NvLightCpuTaskAccess : public NvLightCpuTask { bool atomicIncrementRefCount() { return Nv::Blast::atomicIncrement(&mRefCount); } bool atomicDecrementRefCount() { return Nv::Blast::atomicDecrement(&mRefCount); } }; NvLightCpuTaskAccess& ACCESS(NvLightCpuTask& task) { return reinterpret_cast<NvLightCpuTaskAccess&>(task); } class NvBlastTkTaskTableRow { public: NvBlastTkTaskTableRow() : mRefCount( 1 ), mStartDep(EOL), mLastDep(EOL) {} void addDependency( NvBlastTkTaskDepTable& depTable, NvTaskID taskID ) { int newDep = int(depTable.size()); NvBlastTkTaskDepTableRow row; row.mTaskID = taskID; row.mNextDep = EOL; depTable.pushBack( row ); if( mLastDep == EOL ) { mStartDep = mLastDep = newDep; } else { depTable[ uint32_t(mLastDep) ].mNextDep = newDep; mLastDep = newDep; } } NvTask * mTask; volatile int mRefCount; NvTaskType::Enum mType; int mStartDep; int mLastDep; }; typedef Array<NvBlastTkTaskTableRow>::type NvTaskTable; /* Implementation of NvTaskManager abstract API */ class NvBlastTkTaskManager : public NvTaskManager { NV_NOCOPY(NvBlastTkTaskManager) public: NvBlastTkTaskManager(NvErrorCallback& , NvCpuDispatcher*, NvGpuDispatcher*); ~NvBlastTkTaskManager(); void setCpuDispatcher( NvCpuDispatcher& ref ) { mCpuDispatcher = &ref; } NvCpuDispatcher* getCpuDispatcher() const { return mCpuDispatcher; } void setGpuDispatcher( NvGpuDispatcher& ref ) { mGpuDispatcher = &ref; } NvGpuDispatcher* getGpuDispatcher() const { return mGpuDispatcher; } void resetDependencies(); void startSimulation(); void stopSimulation(); void taskCompleted( NvTask& task ); NvTaskID getNamedTask( const char *name ); NvTaskID submitNamedTask( NvTask *task, const char *name, NvTaskType::Enum type = NvTaskType::TT_CPU ); NvTaskID submitUnnamedTask( NvTask& task, NvTaskType::Enum type = NvTaskType::TT_CPU ); NvTask* getTaskFromID( NvTaskID ); bool dispatchTask( NvTaskID taskID, bool gpuGroupStart ); bool resolveRow( NvTaskID taskID, bool gpuGroupStart ); void release(); void finishBefore( NvTask& task, NvTaskID taskID ); void startAfter( NvTask& task, NvTaskID taskID ); void addReference( NvTaskID taskID ); void decrReference( NvTaskID taskID ); int32_t getReference( NvTaskID taskID ) const; void decrReference( NvLightCpuTask& lighttask ); void addReference( NvLightCpuTask& lighttask ); void emitStartEvent(NvBaseTask& basetask, uint32_t threadId); void emitStopEvent(NvBaseTask& basetask, uint32_t threadId); NvErrorCallback& mErrorCallback; NvCpuDispatcher* mCpuDispatcher; NvGpuDispatcher* mGpuDispatcher; NvBlastTkTaskNameToIDMap mName2IDmap; volatile int mPendingTasks; std::mutex mMutex; NvBlastTkTaskDepTable mDepTable; NvTaskTable mTaskTable; Array<NvTaskID>::type mStartDispatch; }; NvBlastTkTaskManager::NvBlastTkTaskManager(NvErrorCallback& errorCallback, NvCpuDispatcher* cpuDispatcher, NvGpuDispatcher* gpuDispatcher) : mErrorCallback (errorCallback) , mCpuDispatcher( cpuDispatcher ) , mGpuDispatcher( gpuDispatcher ) , mPendingTasks( 0 ) , mDepTable(NV_DEBUG_EXP("NvBlastTkTaskDepTable")) , mTaskTable(NV_DEBUG_EXP("NvTaskTable")) , mStartDispatch(NV_DEBUG_EXP("StartDispatch")) { } NvBlastTkTaskManager::~NvBlastTkTaskManager() { } void NvBlastTkTaskManager::release() { NVBLAST_DELETE(this, NvBlastTkTaskManager); } void NvBlastTkTaskManager::decrReference(NvLightCpuTask& lighttask) { /* This does not need a lock! */ if (!ACCESS(lighttask).atomicDecrementRefCount()) { NVBLAST_ASSERT(mCpuDispatcher); if (mCpuDispatcher) { mCpuDispatcher->submitTask(lighttask); } else { lighttask.release(); } } } void NvBlastTkTaskManager::addReference(NvLightCpuTask& lighttask) { /* This does not need a lock! */ ACCESS(lighttask).atomicIncrementRefCount(); } void NvBlastTkTaskManager::emitStartEvent(NvBaseTask& basetask, uint32_t threadId) { NvBaseTask* tmp = &basetask; NV_UNUSED(tmp); NV_UNUSED(threadId); /* This does not need a lock! */ #if NV_SUPPORT_NVTASK_PROFILING && NV_PROFILE //NV_COMPILE_TIME_ASSERT(sizeof(NvProfileEventId::mEventId) == sizeof(NvBaseTask::mEventID)); if (NvBlastGlobalGetProfilerCallback()) NvBlastGlobalGetProfilerCallback()->zoneStart(basetask.getName(), true, 0); #endif } void NvBlastTkTaskManager::emitStopEvent(NvBaseTask& basetask, uint32_t threadId) { NvBaseTask* tmp = &basetask; NV_UNUSED(tmp); NV_UNUSED(threadId); /* This does not need a lock! */ if (NvBlastGlobalGetProfilerCallback()) NvBlastGlobalGetProfilerCallback()->zoneEnd(nullptr, basetask.getName(), true, 0); #if NV_SUPPORT_NVTASK_PROFILING && NV_PROFILE //NV_COMPILE_TIME_ASSERT(sizeof(NvProfileEventId::mEventId) == sizeof(NvBaseTask::mEventID)); #endif } /* * Called by the owner (Scene) at the start of every frame, before * asking for tasks to be submitted. */ void NvBlastTkTaskManager::resetDependencies() { NVBLAST_ASSERT( !mPendingTasks ); // only valid if you don't resubmit named tasks, this is true for the SDK NVBLAST_ASSERT( mCpuDispatcher ); mTaskTable.clear(); mDepTable.clear(); mName2IDmap.clear(); mPendingTasks = 0; } /* * Called by the owner (Scene) to start simulating the task graph. * Dispatch all tasks with refCount == 1 */ void NvBlastTkTaskManager::startSimulation() { NVBLAST_ASSERT( mCpuDispatcher ); if( mGpuDispatcher ) { mGpuDispatcher->startSimulation(); } /* Handle empty task graph */ if( mPendingTasks == 0 ) { return; } bool gpuDispatch = false; for( NvTaskID i = 0 ; i < mTaskTable.size() ; i++ ) { if( mTaskTable[ i ].mType == NvTaskType::TT_COMPLETED ) { continue; } if( !Nv::Blast::atomicDecrement( &mTaskTable[ i ].mRefCount ) ) { mStartDispatch.pushBack(i); } } for( uint32_t i=0; i<mStartDispatch.size(); ++i) { gpuDispatch |= dispatchTask( mStartDispatch[i], gpuDispatch ); } //mStartDispatch.resize(0); mStartDispatch.forceSize_Unsafe(0); if( mGpuDispatcher && gpuDispatch ) { mGpuDispatcher->finishGroup(); } } void NvBlastTkTaskManager::stopSimulation() { if( mGpuDispatcher ) { mGpuDispatcher->stopSimulation(); } } NvTaskID NvBlastTkTaskManager::getNamedTask( const char *name ) { const NvBlastTkTaskNameToIDMap::Entry *ret; { LOCK(); ret = mName2IDmap.find( name ); } if( ret ) { return ret->second; } else { // create named entry in task table, without a task return submitNamedTask( NULL, name, NvTaskType::TT_NOT_PRESENT ); } } NvTask* NvBlastTkTaskManager::getTaskFromID( NvTaskID id ) { LOCK(); // todo: reader lock necessary? return mTaskTable[ id ].mTask; } /* If called at runtime, must be thread-safe */ NvTaskID NvBlastTkTaskManager::submitNamedTask( NvTask *task, const char *name, NvTaskType::Enum type ) { if( task ) { ACCESS(task)->setTm(this); task->submitted(); } LOCK(); const NvBlastTkTaskNameToIDMap::Entry *ret = mName2IDmap.find( name ); if( ret ) { NvTaskID prereg = ret->second; if( task ) { /* name was registered for us by a dependent task */ NVBLAST_ASSERT( !mTaskTable[ prereg ].mTask ); NVBLAST_ASSERT( mTaskTable[ prereg ].mType == NvTaskType::TT_NOT_PRESENT ); mTaskTable[ prereg ].mTask = task; mTaskTable[ prereg ].mType = type; ACCESS(task)->setTaskID(prereg); } return prereg; } else { Nv::Blast::atomicIncrement(&mPendingTasks); NvTaskID id = static_cast<NvTaskID>(mTaskTable.size()); mName2IDmap[ name ] = id; if( task ) { ACCESS(task)->setTaskID(id); } NvBlastTkTaskTableRow r; r.mTask = task; r.mType = type; mTaskTable.pushBack(r); return id; } } /* * Add an unnamed task to the task table */ NvTaskID NvBlastTkTaskManager::submitUnnamedTask( NvTask& task, NvTaskType::Enum type ) { Nv::Blast::atomicIncrement(&mPendingTasks); ACCESS(task).setTm(this); task.submitted(); LOCK(); ACCESS(task).setTaskID(static_cast<NvTaskID>(mTaskTable.size())); NvBlastTkTaskTableRow r; r.mTask = &task; r.mType = type; mTaskTable.pushBack(r); return task.getTaskID(); } /* Called by worker threads (or cooperating application threads) when a * NvTask has completed. Propogate depdenencies, decrementing all * referenced tasks' refCounts. If any of those reach zero, activate * those tasks. */ void NvBlastTkTaskManager::taskCompleted( NvTask& task ) { LOCK(); if( resolveRow( task.getTaskID(), false ) ) { mGpuDispatcher->finishGroup(); } } /* ================== Private Functions ======================= */ /* * Add a dependency to force 'task' to complete before the * referenced 'taskID' is allowed to be dispatched. */ void NvBlastTkTaskManager::finishBefore( NvTask& task, NvTaskID taskID ) { LOCK(); NVBLAST_ASSERT( mTaskTable[ taskID ].mType != NvTaskType::TT_COMPLETED ); mTaskTable[ task.getTaskID() ].addDependency( mDepTable, taskID ); Nv::Blast::atomicIncrement( &mTaskTable[ taskID ].mRefCount ); } /* * Add a dependency to force 'task' to wait for the referenced 'taskID' * to complete before it is allowed to be dispatched. */ void NvBlastTkTaskManager::startAfter( NvTask& task, NvTaskID taskID ) { LOCK(); NVBLAST_ASSERT( mTaskTable[ taskID ].mType != NvTaskType::TT_COMPLETED ); mTaskTable[ taskID ].addDependency( mDepTable, task.getTaskID() ); Nv::Blast::atomicIncrement( &mTaskTable[ task.getTaskID() ].mRefCount ); } void NvBlastTkTaskManager::addReference( NvTaskID taskID ) { LOCK(); Nv::Blast::atomicIncrement( &mTaskTable[ taskID ].mRefCount ); } /* * Remove one reference count from a task. Must be done here to make it thread safe. */ void NvBlastTkTaskManager::decrReference( NvTaskID taskID ) { LOCK(); if( !Nv::Blast::atomicDecrement( &mTaskTable[ taskID ].mRefCount ) ) { if( dispatchTask( taskID, false ) ) { mGpuDispatcher->finishGroup(); } } } int32_t NvBlastTkTaskManager::getReference(NvTaskID taskID) const { return mTaskTable[ taskID ].mRefCount; } /* * A task has completed, decrement all dependencies and submit tasks * that are ready to run. Signal simulation end if ther are no more * pending tasks. */ bool NvBlastTkTaskManager::resolveRow( NvTaskID taskID, bool gpuGroupStart ) { int depRow = mTaskTable[ taskID ].mStartDep; while( depRow != EOL ) { NvBlastTkTaskDepTableRow& row = mDepTable[ uint32_t(depRow) ]; NvBlastTkTaskTableRow& dtt = mTaskTable[ row.mTaskID ]; if( !Nv::Blast::atomicDecrement( &dtt.mRefCount ) ) { gpuGroupStart |= dispatchTask( row.mTaskID, gpuGroupStart ); } depRow = row.mNextDep; } Nv::Blast::atomicDecrement( &mPendingTasks ); return gpuGroupStart; } /* * Submit a ready task to its appropriate dispatcher. */ bool NvBlastTkTaskManager::dispatchTask( NvTaskID taskID, bool gpuGroupStart ) { LOCK(); // todo: reader lock necessary? NvBlastTkTaskTableRow& tt = mTaskTable[ taskID ]; // prevent re-submission if( tt.mType == NvTaskType::TT_COMPLETED ) { mErrorCallback.reportError(NvErrorCode::eDEBUG_WARNING, "NvTask dispatched twice", __FILE__, __LINE__); return false; } switch ( tt.mType ) { case NvTaskType::TT_CPU: mCpuDispatcher->submitTask( *tt.mTask ); break; case NvTaskType::TT_GPU: #if NV_WINDOWS_FAMILY if( mGpuDispatcher ) { if( !gpuGroupStart ) { mGpuDispatcher->startGroup(); } mGpuDispatcher->submitTask( *tt.mTask ); gpuGroupStart = true; } else #endif { mErrorCallback.reportError(NvErrorCode::eDEBUG_WARNING, "No GPU dispatcher", __FILE__, __LINE__); } break; case NvTaskType::TT_NOT_PRESENT: /* No task registered with this taskID, resolve its dependencies */ NVBLAST_ASSERT(!tt.mTask); gpuGroupStart |= resolveRow( taskID, gpuGroupStart ); break; case NvTaskType::TT_COMPLETED: default: mErrorCallback.reportError(NvErrorCode::eDEBUG_WARNING, "Unknown task type", __FILE__, __LINE__); gpuGroupStart |= resolveRow( taskID, gpuGroupStart ); break; } tt.mType = NvTaskType::TT_COMPLETED; return gpuGroupStart; } } // namespace Blast } // namespace Nv // Implement NvTaskManager factory namespace nvidia { namespace task { NvTaskManager* NvTaskManager::createTaskManager(NvErrorCallback& errorCallback, NvCpuDispatcher* cpuDispatcher, NvGpuDispatcher* gpuDispatcher) { return NVBLAST_NEW(Nv::Blast::NvBlastTkTaskManager)(errorCallback, cpuDispatcher, gpuDispatcher); } } }
16,566
C++
27.51463
143
0.664433
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkGroupImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKGROUPIMPL_H #define NVBLASTTKGROUPIMPL_H #include "NvBlastTkTaskImpl.h" #include "NvBlastTkGroup.h" #include "NvBlastTkTypeImpl.h" namespace Nv { namespace Blast { class TkActorImpl; class TkFamilyImpl; NVBLASTTK_IMPL_DECLARE(Group) { ~TkGroupImpl(); public: TkGroupImpl(); NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('G', 'R', 'P', '\0'); static TkGroupImpl* create(const TkGroupDesc& desc); // Begin TkGroup virtual bool addActor(TkActor& actor) override; virtual uint32_t getActorCount() const override; virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const override; virtual uint32_t startProcess() override; virtual bool endProcess() override; virtual void getStats(TkGroupStats& stats) const override; virtual void setWorkerCount(uint32_t workerCount) override; virtual uint32_t getWorkerCount() const override; virtual TkGroupWorker* acquireWorker() override; virtual void returnWorker(TkGroupWorker*) override; // End TkGroup // TkGroupImpl API /** Remove the actor from this group if the actor actually belongs to it and the group is not processing. \param[in] actor The TkActor to remove. \return true if removing succeeded, false otherwise */ bool removeActor(TkActor& actor); /** Add the actor to this group's job queue. It is the caller's responsibility to add an actor only once. This condition is checked in debug builds. */ void enqueue(TkActorImpl* tkActor); /** Atomically check if this group is processing actors. @see setProcessing() \return true between startProcess() and endProcess() calls, false otherwise */ bool isProcessing() const; private: /** Atomically set the processing state. This function checks for the current state before changing it. @see isProcessing() \param[in] value the value of the new state \return true if the new state could be set, false otherwise */ bool setProcessing(bool value); /** Get the group-family shared memory for the specified family. To be used when the memory is expected to already exist. */ SharedMemory* getSharedMemory(TkFamilyImpl* family); void releaseSharedMemory(TkFamilyImpl* fam, SharedMemory* mem); // functions to add/remove actors _without_ group-family memory management void addActorInternal(TkActorImpl& tkActor); void addActorsInternal(TkActorImpl** actors, uint32_t numActors); void removeActorInternal(TkActorImpl& tkActor); uint32_t m_actorCount; //!< number of actors in this group HashMap<TkFamilyImpl*, SharedMemory*>::type m_sharedMemory; //!< memory sharable by actors in the same family in this group // it is assumed no more than the asset's number of bond and chunks fracture commands are produced SharedBlock<NvBlastChunkFractureData> m_chunkTempDataBlock; //!< chunk data for damage/fracture SharedBlock<NvBlastBondFractureData> m_bondTempDataBlock; //!< bond data for damage/fracture SharedBlock<NvBlastChunkFractureData> m_chunkEventDataBlock; //!< initial memory block for event data SharedBlock<NvBlastBondFractureData> m_bondEventDataBlock; //!< initial memory block for event data SharedBlock<char> m_splitScratchBlock; //!< split scratch memory std::atomic<bool> m_isProcessing; //!< true while workers are processing Array<TkWorker>::type m_workers; //!< this group's workers Array<TkWorkerJob>::type m_jobs; //!< this group's process jobs //#if NV_PROFILE TkGroupStats m_stats; //!< accumulated group's worker stats //#endif std::mutex m_workerMtx; friend class TkWorker; }; NV_INLINE bool TkGroupImpl::isProcessing() const { return m_isProcessing.load(); } NV_INLINE void TkGroupImpl::getStats(TkGroupStats& stats) const { #if NV_PROFILE memcpy(&stats, &m_stats, sizeof(TkGroupStats)); #else NV_UNUSED(stats); #endif } NV_INLINE uint32_t TkGroupImpl::getActorCount() const { return m_actorCount; } NV_INLINE SharedMemory* TkGroupImpl::getSharedMemory(TkFamilyImpl* family) { SharedMemory* mem = m_sharedMemory[family]; NVBLAST_ASSERT(mem != nullptr); return mem; } NV_FORCE_INLINE void operator +=(NvBlastTimers& lhs, const NvBlastTimers& rhs) { lhs.material += rhs.material; lhs.fracture += rhs.fracture; lhs.island += rhs.fracture; lhs.partition += rhs.partition; lhs.visibility += rhs.visibility; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKGROUPIMPL_H
6,776
C
33.93299
139
0.664994
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFrameworkImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastAssert.h" #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" #include "NvBlastTkGroupImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlastTkTypeImpl.h" #include "NvBlastGlobals.h" #include <algorithm> using namespace nvidia; using namespace nvidia::shdfnd; NV_INLINE bool operator < (const NvBlastID& id1, const NvBlastID& id2) { return memcmp(&id1, &id2, sizeof(NvBlastID)) < 0; } namespace Nv { namespace Blast { //////// Local definitions //////// // Map type ID to static type data #define NVBLASTTK_REGISTER_TYPE(_name) \ if (!Tk##_name##Impl::s_type.indexIsValid()) \ { \ Tk##_name##Impl::s_type.setIndex(TkTypeIndex::_name); \ } \ m_types[TkTypeIndex::_name] = &Tk##_name##Impl::s_type; \ m_typeIDToIndex[Tk##_name##Impl::s_type.getID()] = TkTypeIndex::_name #define NVBLASTTK_RELEASE_TYPE(_name) \ { \ TkTypeImpl& type = Tk##_name##Impl::s_type; \ auto& toRelease = m_objects[type.getIndex()]; \ for (TkObject* obj : toRelease) \ { \ obj->release(); \ } \ } //////// TkFrameworkImpl static variables //////// TkFrameworkImpl* TkFrameworkImpl::s_framework = nullptr; //////// TkFrameworkImpl static function //////// TkFrameworkImpl* TkFrameworkImpl::get() { return s_framework; } bool TkFrameworkImpl::set(TkFrameworkImpl* framework) { if (s_framework != nullptr) { if (framework != nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::set: framework already set. Pass NULL to this function to destroy framework."); return false; } NVBLAST_DELETE(s_framework, TkFrameworkImpl); } s_framework = framework; return true; } //////// TkFrameworkImpl methods //////// TkFrameworkImpl::TkFrameworkImpl() : TkFramework() { // Register types m_types.resize(TkTypeIndex::TypeCount); m_objects.resize(TkTypeIndex::TypeCount); NVBLASTTK_REGISTER_TYPE(Asset); NVBLASTTK_REGISTER_TYPE(Family); NVBLASTTK_REGISTER_TYPE(Group); } TkFrameworkImpl::~TkFrameworkImpl() { } void TkFrameworkImpl::release() { // Special release of joints, which are not TkIdentifiable: Array<TkJointImpl*>::type joints; // Since the EraseIterator is not exposed joints.reserve(m_joints.size()); for (auto j = m_joints.getIterator(); !j.done(); ++j) { joints.pushBack(*j); } for (uint32_t i = 0; i < joints.size(); ++i) { joints[i]->release(); } NVBLAST_ASSERT(m_joints.size() == 0); joints.reset(); // Since we will be deleting the allocator NVBLASTTK_RELEASE_TYPE(Group); NVBLASTTK_RELEASE_TYPE(Asset); set(nullptr); } const TkType* TkFrameworkImpl::getType(TkTypeIndex::Enum typeIndex) const { if (typeIndex < 0 || typeIndex >= TkTypeIndex::TypeCount) { NVBLAST_LOG_WARNING("TkFrameworkImpl::getType: invalid typeIndex."); return nullptr; } return m_types[typeIndex]; } TkIdentifiable* TkFrameworkImpl::findObjectByID(const NvBlastID& id) const { TkIdentifiable* object = findObjectByIDInternal(id); if (object == nullptr) { NVBLAST_LOG_WARNING("TkFrameworkImpl::findObjectByID: object not found."); } return object; } uint32_t TkFrameworkImpl::getObjectCount(const TkType& type) const { const uint32_t index = static_cast<const TkTypeImpl&>(type).getIndex(); if (index >= m_objects.size()) { NVBLAST_LOG_ERROR("TkFrameworkImpl::getObjectCount: BlastTk object type unrecognized."); return 0; } return m_objects[index].size(); } uint32_t TkFrameworkImpl::getObjects(TkIdentifiable** buffer, uint32_t bufferSize, const TkType& type, uint32_t indexStart /* = 0 */) const { const uint32_t index = static_cast<const TkTypeImpl&>(type).getIndex(); if (index >= m_objects.size()) { NVBLAST_LOG_ERROR("TkFrameworkImpl::getObjectCount: BlastTk object type unrecognized."); return 0; } const auto& objectArray = m_objects[index]; uint32_t objectCount = objectArray.size(); if (objectCount <= indexStart) { NVBLAST_LOG_WARNING("TkFrameworkImpl::getObjects: indexStart beyond end of object list."); return 0; } objectCount -= indexStart; if (objectCount > bufferSize) { objectCount = bufferSize; } memcpy(buffer, objectArray.begin() + indexStart, objectCount * sizeof(TkObject*)); return objectCount; } bool TkFrameworkImpl::reorderAssetDescChunks(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount, NvBlastBondDesc* bondDescs, uint32_t bondCount, uint32_t* chunkReorderMap /*= nullptr*/, bool keepBondNormalChunkOrder /*= false*/) const { uint32_t* map = chunkReorderMap != nullptr ? chunkReorderMap : static_cast<uint32_t*>(NVBLAST_ALLOC_NAMED(chunkCount * sizeof(uint32_t), "reorderAssetDescChunks:chunkReorderMap")); void* scratch = NVBLAST_ALLOC_NAMED(chunkCount * sizeof(NvBlastChunkDesc), "reorderAssetDescChunks:scratch"); const bool result = NvBlastReorderAssetDescChunks(chunkDescs, chunkCount, bondDescs, bondCount, map, keepBondNormalChunkOrder, scratch, logLL); NVBLAST_FREE(scratch); if (chunkReorderMap == nullptr) { NVBLAST_FREE(map); } return result; } bool TkFrameworkImpl::ensureAssetExactSupportCoverage(NvBlastChunkDesc* chunkDescs, uint32_t chunkCount) const { void* scratch = NVBLAST_ALLOC_NAMED(chunkCount, "ensureAssetExactSupportCoverage:scratch"); const bool result = NvBlastEnsureAssetExactSupportCoverage(chunkDescs, chunkCount, scratch, logLL); NVBLAST_FREE(scratch); return result; } TkAsset* TkFrameworkImpl::createAsset(const TkAssetDesc& desc) { TkAssetImpl* asset = TkAssetImpl::create(desc); if (asset == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createAsset: failed to create asset."); } return asset; } TkAsset* TkFrameworkImpl::createAsset(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs, uint32_t jointDescCount, bool ownsAsset) { TkAssetImpl* asset = TkAssetImpl::create(assetLL, jointDescs, jointDescCount, ownsAsset); if (asset == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createAsset: failed to create asset."); } return asset; } TkGroup* TkFrameworkImpl::createGroup(const TkGroupDesc& desc) { TkGroupImpl* group = TkGroupImpl::create(desc); if (group == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createGroup: failed to create group."); } return group; } TkActor* TkFrameworkImpl::createActor(const TkActorDesc& desc) { TkActor* actor = TkActorImpl::create(desc); if (actor == nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::createActor: failed to create actor."); } return actor; } TkJoint* TkFrameworkImpl::createJoint(const TkJointDesc& desc) { TkJointImpl** handle0 = nullptr; TkJointImpl** handle1 = nullptr; TkFamilyImpl* family0 = static_cast<TkFamilyImpl*>(desc.families[0]); TkFamilyImpl* family1 = static_cast<TkFamilyImpl*>(desc.families[1]); NVBLAST_CHECK_ERROR(family0 != nullptr || family1 != nullptr, "TkFrameworkImpl::createJoint: at least one family in the TkJointDesc must be valid.", return nullptr); NVBLAST_CHECK_ERROR(family0 == nullptr || desc.chunkIndices[0] < family0->getAssetImpl()->getChunkCount(), "TkFrameworkImpl::createJoint: desc.chunkIndices[0] is invalid.", return nullptr); NVBLAST_CHECK_ERROR(family1 == nullptr || desc.chunkIndices[1] < family1->getAssetImpl()->getChunkCount(), "TkFrameworkImpl::createJoint: desc.chunkIndices[1] is invalid.", return nullptr); const bool actorsAreTheSame = family0 == family1 && family0->getActorByChunk(desc.chunkIndices[0]) == family1->getActorByChunk(desc.chunkIndices[1]); NVBLAST_CHECK_ERROR(!actorsAreTheSame, "TkFrameworkImpl::createJoint: the chunks listed in the TkJointDesc must be in different actors.", return nullptr); if (family0 != nullptr) { const bool isSupportChunk = !isInvalidIndex(NvBlastAssetGetChunkToGraphNodeMap(family0->getAssetImpl()->getAssetLLInternal(), logLL)[desc.chunkIndices[0]]); NVBLAST_CHECK_ERROR(isSupportChunk, "TkFrameworkImpl::createJoint: desc.chunkIndices[0] is not a support chunk in the asset for desc.families[0]. Joint not created.", return nullptr); handle0 = family0->createExternalJointHandle(getFamilyID(family1), desc.chunkIndices[0], desc.chunkIndices[1]); NVBLAST_CHECK_ERROR(handle0 != nullptr, "TkFrameworkImpl::createJoint: could not create joint handle in family[0]. Joint not created.", return nullptr); } if (family1 != nullptr) { const bool isSupportChunk = !isInvalidIndex(NvBlastAssetGetChunkToGraphNodeMap(family1->getAssetImpl()->getAssetLLInternal(), logLL)[desc.chunkIndices[1]]); NVBLAST_CHECK_ERROR(isSupportChunk, "TkFrameworkImpl::createJoint: desc.chunkIndices[1] is not a support chunk in the asset for desc.families[1]. Joint not created.", return nullptr); if (family1 != family0) { handle1 = family1->createExternalJointHandle(getFamilyID(family0), desc.chunkIndices[1], desc.chunkIndices[0]); NVBLAST_CHECK_ERROR(handle1 != nullptr, "TkFrameworkImpl::createJoint: could not create joint handle in family[1]. Joint not created.", return nullptr); } } TkJointImpl* joint = NVBLAST_NEW(TkJointImpl)(desc, nullptr); NVBLAST_CHECK_ERROR(joint != nullptr, "TkFrameworkImpl::createJoint: failed to create joint.", return nullptr); const TkJointData& jointData = joint->getDataInternal(); if (handle0 != nullptr) { *handle0 = joint; static_cast<TkActorImpl*>(jointData.actors[0])->addJoint(joint->m_links[0]); } if (handle1 != nullptr) { *handle1 = joint; if (jointData.actors[0] != jointData.actors[1]) { static_cast<TkActorImpl*>(jointData.actors[1])->addJoint(joint->m_links[1]); } } return joint; } void TkFrameworkImpl::onCreate(TkIdentifiable& object) { const TkTypeImpl& type = static_cast<const TkTypeImpl&>(object.getType()); const uint32_t index = type.getIndex(); if (index >= m_objects.size()) { if (!isInvalidIndex(index)) { NVBLAST_LOG_ERROR("TkFrameworkImpl::addObject: object type unrecognized."); } return; } auto& objectArray = m_objects[index]; NVBLAST_ASSERT(objectArray.find(&object) == objectArray.end()); objectArray.pushBack(&object); } void TkFrameworkImpl::onDestroy(TkIdentifiable& object) { // remove from id map if present const auto id = object.getID(); if (!TkGUIDIsZero(&id)) { m_IDToObject.erase(id); } // remove from object list const TkTypeImpl& type = static_cast<const TkTypeImpl&>(object.getType()); const uint32_t index = type.getIndex(); if (index >= m_objects.size()) { if (!isInvalidIndex(index)) { NVBLAST_LOG_ERROR("TkFrameworkImpl::removeObject: object type unrecognized."); } return; } auto& objectArray = m_objects[index]; objectArray.findAndReplaceWithLast(&object); } void TkFrameworkImpl::onCreate(TkJointImpl& joint) { NVBLAST_CHECK_ERROR(m_joints.insert(&joint), "TkFrameworkImpl::onCreate: Joint already tracked.", return); } void TkFrameworkImpl::onDestroy(TkJointImpl& joint) { NVBLAST_CHECK_ERROR(m_joints.erase(&joint), "TkFrameworkImpl::onDestroy: Joint not tracked.", return); } void TkFrameworkImpl::onIDChange(TkIdentifiable& object, const NvBlastID& IDPrev, const NvBlastID& IDCurr) { if (!TkGUIDIsZero(&IDPrev)) { if (!m_IDToObject.erase(IDPrev)) { NVBLAST_LOG_ERROR("TkFrameworkImpl::reportIDChanged: object with previous ID doesn't exist."); } } if (!TkGUIDIsZero(&IDCurr)) { auto& value = m_IDToObject[IDCurr]; if (value != nullptr) { NVBLAST_LOG_ERROR("TkFrameworkImpl::reportIDChanged: object with new ID already exists."); return; } value = &object; } } } // namespace Blast } // namespace Nv //////// Global API implementation //////// Nv::Blast::TkFramework* NvBlastTkFrameworkCreate() { if (Nv::Blast::TkFrameworkImpl::get() != nullptr) { NVBLAST_LOG_ERROR("TkFramework::create: framework already created. Use TkFramework::get() to access."); return nullptr; } Nv::Blast::TkFrameworkImpl* framework = NVBLAST_NEW(Nv::Blast::TkFrameworkImpl) (); Nv::Blast::TkFrameworkImpl::set(framework); return Nv::Blast::TkFrameworkImpl::get(); } Nv::Blast::TkFramework* NvBlastTkFrameworkGet() { return Nv::Blast::TkFrameworkImpl::get(); }
15,109
C++
31.634989
233
0.657886
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkTask.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKTASK_H #define NVBLASTTKTASK_H #include "NvBlastTkGroupTaskManager.h" #include "NvTask.h" #include "NvBlastTkGroup.h" #include <atomic> #include <mutex> #include <condition_variable> namespace Nv { namespace Blast { /** Counting synchronization object for waiting on TkWorkers to finish. */ class TkTaskSync { public: /** Initializes with an expected number of notifications. */ TkTaskSync(uint32_t count) : m_count(count) {} /** Blocks until the expected number of notifications happened. */ void wait() { std::unique_lock<std::mutex> lk(m_mutex); m_cv.wait(lk, [&] { return m_count == 0; }); } /** Decrement the wait() count by one. */ void notify() { //PERF_SCOPE_H("TaskSync::notify"); std::unique_lock<std::mutex> lk(m_mutex); if (m_count > 0) { m_count--; } if (m_count == 0) { lk.unlock(); m_cv.notify_one(); } } /** Peek if notifications are pending. */ bool isDone() { std::unique_lock<std::mutex> lk(m_mutex); return m_count == 0; } /** Sets the expected number of notifications for wait() to unblock. */ void setCount(uint32_t count) { m_count = count; } private: std::mutex m_mutex; std::condition_variable m_cv; uint32_t m_count; }; /** Common job counter for all tasks. */ class TkAtomicCounter { public: TkAtomicCounter() : m_current(0), m_maxCount(0) {} bool isValid(uint32_t val) { return val < m_maxCount; } uint32_t next() { return m_current.fetch_add(1); } void reset(uint32_t maxCount) { m_maxCount = maxCount; m_current = 0; } private: std::atomic<uint32_t> m_current; uint32_t m_maxCount; }; /** A task running one group job after the other until done. Synchronizes atomically with its siblings. */ class TkGroupWorkerTask : public nvidia::task::NvLightCpuTask { public: TkGroupWorkerTask() : NvLightCpuTask(), m_group(nullptr), m_counter(nullptr), m_sync(nullptr) { } void setup(TkGroup* group, TkAtomicCounter* counter, TkTaskSync* sync) { m_group = group; m_counter = counter; m_sync = sync; } virtual void run() override { Nv::Blast::TkGroupWorker* worker = m_group->acquireWorker(); uint32_t jobID = m_counter->next(); while (m_counter->isValid(jobID)) { worker->process(jobID); jobID = m_counter->next(); } m_group->returnWorker(worker); } virtual void release() override { NvLightCpuTask::release(); // release the sync last m_sync->notify(); } virtual const char* getName() const override { return "BlastGroupWorkerTask"; } private: TkGroup* m_group; TkAtomicCounter* m_counter; TkTaskSync* m_sync; }; /** Implements TkGroupTaskManager */ class TkGroupTaskManagerImpl : public TkGroupTaskManager { public: TkGroupTaskManagerImpl(nvidia::task::NvTaskManager& taskManager, TkGroup* group) : m_taskManager(taskManager), m_sync(0), m_group(group) {} // TkGroupTaskManager API virtual void setGroup(TkGroup*) override; virtual uint32_t process(uint32_t) override; virtual void release() override; virtual bool wait(bool block) override; private: static const uint32_t TASKS_MAX_COUNT = 16; nvidia::task::NvTaskManager& m_taskManager; TkAtomicCounter m_counter; TkGroupWorkerTask m_tasks[TASKS_MAX_COUNT]; TkTaskSync m_sync; TkGroup* m_group; }; } // namespace Blast } // namespace Nv #endif // NVBLASTTKTASK_H
5,444
C
25.052631
99
0.641073
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkJointImpl.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastTkFrameworkImpl.h" #include "NvBlastTkJointImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkAssetImpl.h" #include "NvBlastTkFamilyImpl.h" namespace Nv { namespace Blast { //////// Member functions //////// TkJointImpl::TkJointImpl(const TkJointDesc& desc, TkFamilyImpl* owner) : m_owner(owner) { userData = nullptr; // Do not fire off a creation event. Creation events will only be fired when a family-internal joint is created. NVBLAST_ASSERT(desc.families[0] != nullptr || desc.families[1] != nullptr); NVBLAST_ASSERT(desc.families[0] == nullptr || desc.chunkIndices[0] < static_cast<TkFamilyImpl*>(desc.families[0])->getAssetImpl()->getChunkCount()); NVBLAST_ASSERT(desc.attachPositions[0].isFinite()); NVBLAST_ASSERT(desc.families[1] == nullptr || desc.chunkIndices[1] < static_cast<TkFamilyImpl*>(desc.families[1])->getAssetImpl()->getChunkCount()); NVBLAST_ASSERT(desc.attachPositions[1].isFinite()); for (int i = 0; i < 2; ++i) { m_data.actors[i] = desc.families[i] != nullptr ? static_cast<TkFamilyImpl*>(desc.families[i])->getActorByChunk(desc.chunkIndices[i]) : nullptr; m_data.chunkIndices[i] = desc.chunkIndices[i]; m_data.attachPositions[i] = desc.attachPositions[i]; m_links[i].m_joint = this; } if (owner == nullptr) { TkFrameworkImpl::get()->onCreate(*this); } } void TkJointImpl::release() { removeReferencesInActors(); if (m_owner != nullptr) { // Internal joint m_owner->releaseJoint(*this); } else { // External joint removeReferencesInFamilies(); TkFrameworkImpl::get()->onDestroy(*this); NVBLAST_DELETE(this, TkJointImpl); } } void TkJointImpl::setActors(TkActorImpl* actor0, TkActorImpl* actor1, TkEventQueue* alternateQueue) { NVBLAST_ASSERT(m_data.actors[0] != nullptr || m_data.actors[1] != nullptr); const bool unreferenced = (actor0 == nullptr && m_data.actors[0] != nullptr) || (actor1 == nullptr && m_data.actors[1] != nullptr); removeReferencesInActors(); if (!unreferenced) { if (actor0 != nullptr) { actor0->addJoint(m_links[0]); } if (actor1 != nullptr && actor1 != actor0) // If the actors are the same, we only need one joint reference { actor1->addJoint(m_links[1]); } } // We do _not_ return if m_data.m_actors[0] == actor0 && m_data.m_actors[1] == actor1 since // this leads to a bug. This function will only be called when an actor is split. It is // possible that the two TkActors in a joint are the same as before, but in this case one // of the actors will be the split actor. Since will be represented by a different // physical actor, this case still needs to be reported in an event. Returning when neither // TkActor has changed will prevent that, and lead to unwanted joint disconnection. const uint32_t familyToUse = m_data.actors[0] != actor0 ? 0 : 1; TkEventQueue* q = alternateQueue == nullptr ? &static_cast<TkActorImpl*>(m_data.actors[familyToUse])->getFamilyImpl().getQueue() : alternateQueue; const bool jointWasInternal = m_data.actors[0] == m_data.actors[1]; if (unreferenced) { removeReferencesInFamilies(); actor0 = actor1 = nullptr; // Make both new actors NULL } if (!jointWasInternal || actor0 != actor1) { // The original actors were different, or they are now, signal a joint update TkJointUpdateEvent* e = q->allocData<TkJointUpdateEvent>(); e->joint = this; e->subtype = unreferenced ? TkJointUpdateEvent::Unreferenced : (jointWasInternal ? TkJointUpdateEvent::External : TkJointUpdateEvent::Changed); m_data.actors[0] = actor0; m_data.actors[1] = actor1; q->addEvent(e); } else if (jointWasInternal) { // The joint was originally created within the same actor and now it remains within the same actor. m_data.actors[0] = m_data.actors[1] = actor0; } } const TkJointData TkJointImpl::getData() const { return getDataInternal(); } void TkJointImpl::removeReferencesInActors() { TkActorImpl* actor0 = static_cast<TkActorImpl*>(m_data.actors[0]); TkActorImpl* actor1 = static_cast<TkActorImpl*>(m_data.actors[1]); if (actor0 != nullptr) { actor0->removeJoint(m_links[0]); } if (actor1 != nullptr && actor1 != actor0) // If the actors are the same, we only had one joint reference { actor1->removeJoint(m_links[1]); } } void TkJointImpl::removeReferencesInFamilies() { if (m_owner != nullptr) { return; // Only concerned with external joints } NVBLAST_ASSERT(m_data.actors[0] != m_data.actors[1] || m_data.actors[0] == nullptr); // This is enforced by the initial assumption in TkFrameworkImpl::createJoint. for (int i = 0; i < 2; ++i) { if (m_data.actors[i] != nullptr) { TkFamilyImpl& family = static_cast<TkActorImpl*>(m_data.actors[i])->getFamilyImpl(); TkJointImpl* joint = nullptr; const bool found = family.deleteExternalJointHandle(joint, getFamilyID(m_data.actors[i ^ 1]), m_data.chunkIndices[i], m_data.chunkIndices[i ^ 1]); NVBLAST_ASSERT((!found && m_data.actors[i ^ 1] == nullptr) || joint == this); // Might not be found if the actors in a family are in the process of being deleted NV_UNUSED(found); } } } } // namespace Blast } // namespace Nv
7,185
C++
35.851282
175
0.667223
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkAssetImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKASSETIMPL_H #define NVBLASTTKASSETIMPL_H #include "NvBlastTkCommon.h" #include "NvBlastTkJoint.h" #include "NvBlastTkAsset.h" #include "NvBlastTkTypeImpl.h" #include "NvBlastArray.h" // Forward declarations struct NvBlastAsset; namespace Nv { namespace Blast { /** Implementation of TkAsset */ NVBLASTTK_IMPL_DECLARE(Asset) { public: TkAssetImpl(); TkAssetImpl(const NvBlastID& id); ~TkAssetImpl(); NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('A', 'S', 'S', 'T'); // Public methods /** Factory create method. This method creates a low-level asset and stores a reference to it. \param[in] desc Asset descriptor set by the user. \return a pointer to a new TkAssetImpl object if successful, NULL otherwise. */ static TkAssetImpl* create(const TkAssetDesc& desc); /** Static method to create an asset from an existing low-level asset. \param[in] assetLL A valid low-level asset passed in by the user. \param[in] jointDescs Optional joint descriptors to add to the new asset. \param[in] jointDescCount The number of joint descriptors in the jointDescs array. If non-zero, jointDescs cannot be NULL. \param[in] ownsAsset Whether or not to let this TkAssetImpl object release the low-level NvBlastAsset memory upon its own release. \return a pointer to a new TkAssetImpl object if successful, NULL otherwise. */ static TkAssetImpl* create(const NvBlastAsset* assetLL, Nv::Blast::TkAssetJointDesc* jointDescs = nullptr, uint32_t jointDescCount = 0, bool ownsAsset = false); /** \return a pointer to the underlying low-level NvBlastAsset associated with this asset. */ const NvBlastAsset* getAssetLLInternal() const; /** \return the number of internal joint descriptors stored with this asset. */ uint32_t getJointDescCountInternal() const; /** \return the array of internal joint descriptors stored with this asset, with size given by getJointDescCountInternal(). */ const TkAssetJointDesc* getJointDescsInternal() const; // Begin TkAsset virtual const NvBlastAsset* getAssetLL() const override; virtual uint32_t getChunkCount() const override; virtual uint32_t getLeafChunkCount() const override; virtual uint32_t getBondCount() const override; virtual const NvBlastChunk* getChunks() const override; virtual const NvBlastBond* getBonds() const override; virtual const NvBlastSupportGraph getGraph() const override; virtual uint32_t getDataSize() const override; virtual uint32_t getJointDescCount() const override; virtual const TkAssetJointDesc* getJointDescs() const override; // End TkAsset private: /** Utility to add a joint descriptor between the indexed chunks. The two chunks must be support chunks, and there must exist a bond between them. The joint's attachment positions will be the bond centroid. \param[in] chunkIndex0 The first chunk index. \param[in] chunkIndex1 The second chunk index. \return true iff successful. */ bool addJointDesc(uint32_t chunkIndex0, uint32_t chunkIndex1); NvBlastAsset* m_assetLL; //!< The underlying low-level asset. Array<TkAssetJointDesc>::type m_jointDescs; //!< The array of internal joint descriptors. bool m_ownsAsset; //!< Whether or not this asset should release its low-level asset upon its own release. }; //////// TkAssetImpl inline methods //////// NV_INLINE const NvBlastAsset* TkAssetImpl::getAssetLLInternal() const { return m_assetLL; } NV_INLINE uint32_t TkAssetImpl::getJointDescCountInternal() const { return m_jointDescs.size(); } NV_INLINE const TkAssetJointDesc* TkAssetImpl::getJointDescsInternal() const { return m_jointDescs.begin(); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKASSETIMPL_H
5,764
C
34.368098
180
0.695524
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkEventQueue.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKEVENTQUEUE_H #define NVBLASTTKEVENTQUEUE_H #include <algorithm> #include <vector> #include <mutex> #include <atomic> #include "NvBlastTkFrameworkImpl.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { /** A dispatcher queue providing preallocation and thread-safe insertions therein. Typical usage: - preallocate space for events and payload: - reserveEvents, reserveData - enable asserts to detect undersized storage (allocations are not thread safe): - protect(true) - get pointers to payload data and events to fill in, thread safe for preallocated memory: - allocData, addEvent - back on main thread, ensure consistency: - protect(false) - continue adding events and payload on main thread if necessary like above (allocations are safe here) eventually dispatch, or reset if dispatched by proxy */ class TkEventQueue { public: TkEventQueue() : m_currentEvent(0), m_poolCapacity(0), m_pool(nullptr), m_allowAllocs(true) {} /** Peek events queue for dispatch. Do not use in protected state. */ operator const Array<TkEvent>::type&() { NVBLAST_ASSERT(m_allowAllocs); NVBLAST_ASSERT(m_currentEvent == m_events.size()); return m_events; } /** Debug help to catch (unwanted) allocations during task work. Note that this will not actually avoid allocations, but assert in debug builds. Set true before using in distributed environment. Set false to return to single-thread mode. */ void protect(bool enable) { // During parallel use, m_events.size() and m_currentEvent are allowed to diverge. // This is fine because resizeUninitialized does not alter the stored data. NVBLAST_ASSERT(m_currentEvent <= m_events.capacity()); m_events.resizeUninitialized(m_currentEvent); m_allowAllocs = !enable; } /** Restores initial state. Data memory is currently not being reused. To be improved. */ void reset() { m_events.clear(); m_currentEvent = 0; for (void* mem : m_memory) { NVBLAST_FREE(mem); } m_memory.clear(); m_currentData = 0; m_allowAllocs = true; m_poolCapacity = 0; m_pool = nullptr; } /** Queue an event with a payload. */ template<class T> void addEvent(T* payload) { uint32_t index = m_currentEvent.fetch_add(1); // Should not allocate in protected state. NVBLAST_ASSERT(m_allowAllocs || m_currentEvent <= m_events.capacity()); m_events.resizeUninitialized(m_currentEvent); // During parallel use, m_events.size() and m_currentEvent are allowed to diverge. // Consistency is restored in protect(). NVBLAST_ASSERT(!m_allowAllocs || m_currentEvent == m_events.size()); TkEvent& evt = m_events[index]; evt.type = TkEvent::Type(T::EVENT_TYPE); evt.payload = payload; } /** Request storage for payload. */ template<typename T> T* allocData() { uint32_t index = m_currentData.fetch_add(sizeof(T)); if (m_currentData <= m_poolCapacity) { return reinterpret_cast<T*>(&m_pool[index]); } else { // Could do larger block allocation here. reserveData(sizeof(T)); // Account for the requested size. m_currentData = sizeof(T); return reinterpret_cast<T*>(&m_pool[0]); } } /** Preallocate a memory block of size Bytes for payload data. Note that this will inevitably allocate a new memory block. Subsequent calls to allocData will use this memory piecewise. */ void reserveData(size_t size) { NVBLAST_ASSERT(m_allowAllocs); m_pool = reinterpret_cast<uint8_t*>(allocDataBySize(size)); m_poolCapacity = size; m_currentData = 0; } /** Preallocate space for events. */ void reserveEvents(uint32_t n) { NVBLAST_ASSERT(m_allowAllocs); m_events.reserve(m_events.size() + n); } /** Add a listener to dispatch to. */ void addListener(TkEventListener& l) { m_listeners.pushBack(&l); } /** Remove a listener from dispatch list. */ void removeListener(TkEventListener& l) { m_listeners.findAndReplaceWithLast(&l); } /** Dispatch the stored events to the registered listeners. After dispatch, all data is invalidated. */ void dispatch() { dispatch(*this); reset(); } /** Proxy function to dispatch events to this queue's listeners. */ void dispatch(const Array<TkEvent>::type& events) const { if (events.size()) { for (TkEventListener* l : m_listeners) { BLAST_PROFILE_SCOPE_M("TkEventQueue::dispatch"); l->receive(events.begin(), events.size()); } } } private: /** Allocates and stores a block of size Bytes of payload data. */ void* allocDataBySize(size_t size) { void* memory = nullptr; if (size > 0) { memory = NVBLAST_ALLOC_NAMED(size, "TkEventQueue Data"); m_memory.pushBack(memory); } return memory; } Array<TkEvent>::type m_events; //!< holds events Array<void*>::type m_memory; //!< holds allocated data memory blocks std::atomic<uint32_t> m_currentEvent; //!< reference index for event insertion std::atomic<uint32_t> m_currentData; //!< reference index for data insertion size_t m_poolCapacity; //!< size of the currently active memory block (m_pool) uint8_t* m_pool; //!< the current memory block allocData() uses bool m_allowAllocs; //!< assert guard InlineArray<TkEventListener*,4>::type m_listeners; //!< objects to dispatch to }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKEVENTQUEUE_H
7,933
C
30.991935
136
0.621329
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkJointImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKJOINTIMPL_H #define NVBLASTTKJOINTIMPL_H #include "NvBlastTkJoint.h" #include "NvBlastTkCommon.h" #include "NvBlastIndexFns.h" #include "NvBlastAssert.h" #include "NvBlastDLink.h" #include <atomic> namespace Nv { namespace Blast { // Forward declarations class TkActorImpl; class TkJointImpl; class TkFamilyImpl; class TkEventQueue; /** Double-sided link (DLink) which holds a reference back to a joint which contains it. */ struct TkJointLink : public DLink { TkJointImpl* m_joint; //!< The joint containing this link. }; /** Implementation of TkJoint. */ class TkJointImpl : public TkJoint { public: /** Blank constructor only creates valid TkJointLinks (point back to this object) */ TkJointImpl(); /** This constructor sets all internal data. If the joint is defined in an asset, the family instanced from that asset will own this joint, and the 'owner' parameter is that family. Otherwise, in the case where a joint is created from TkFramwork::createJoint, the joint is not owned by a family and 'owner' will be NULL. */ TkJointImpl(const TkJointDesc& desc, TkFamilyImpl* owner); // Begin TkObject virtual void release() override; // End TkObject // Begin TkJoint virtual const TkJointData getData() const override; // End TkJoint // Public API /** Internal method to access a const reference to the joint data. \return a const reference to the joint data. */ const TkJointData& getDataInternal() const; /** Internal method to access a non-const reference to the joint data. \return a non-const reference to the joint data. */ TkJointData& getDataWritable(); /** Set the actors that this joint attaches to. When the actors are different from the joint's current actors, an event will be generated on one of the actors' families event queues to signal the change. Alternatively, if alternateQueue is not NULL then it will be used to hold the event. If a non-NULL attached actor becomes NULL, then this joint will detach its references to both actors (if they exist) and send an event of subtype Unreferenced. This signals the user that the joint may be deleted. \param[in] actor0 The new TkActor to replace the first attached actor. \param[in] actor1 The new TkActor to replace the second attached actor. \param[in] alternateQueue If not NULL, this queue will be used to hold events generated by this function. */ void setActors(TkActorImpl* actor0, TkActorImpl* actor1, TkEventQueue* alternateQueue = nullptr); /** Ensures that any attached actors no longer refer to this joint. */ void removeReferencesInActors(); /** Ensures that any attached actors' families no longer refer to this joint. External joints (created using TkFramework::createJoint) are referenced by the attached actors' families. */ void removeReferencesInFamilies(); private: TkJointData m_data; //!< The data given to the user: attached actors, chunk indices, and actor-local attachment positions. TkJointLink m_links[2]; //!< One link for each actor in m_data.m_actors. If m_data.m_actors[0] == m_data.m_actors[1], then only m_links[0] is used. TkFamilyImpl* m_owner; //!< The owning family if this is an internal joint created during TkFramework::createActor() from a TkAssetDesc with joint flags. friend class TkFrameworkImpl; friend class TkFamilyImpl; friend class TkActorImpl; }; //////// TkJointImpl inline methods //////// NV_INLINE TkJointImpl::TkJointImpl() { m_links[0].m_joint = m_links[1].m_joint = this; } NV_INLINE const TkJointData& TkJointImpl::getDataInternal() const { return m_data; } NV_INLINE TkJointData& TkJointImpl::getDataWritable() { return m_data; } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKJOINTIMPL_H
5,637
C
33.378049
162
0.710484
NVIDIA-Omniverse/PhysX/blast/source/sdk/toolkit/NvBlastTkFamilyImpl.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTTKFAMILYIMPL_H #define NVBLASTTKFAMILYIMPL_H #include "NvBlastTkCommon.h" #include "NvBlastTkFamily.h" #include "NvBlastTkTypeImpl.h" #include "NvBlastTkActorImpl.h" #include "NvBlastTkEventQueue.h" #include "NvBlastHashSet.h" #include "NvBlastHashMap.h" #include "NvBlast.h" #include "NvBlastAssert.h" #include "NvBlastDLink.h" // Forward declarations struct NvBlastFamily; namespace Nv { namespace Blast { // Forward declarations class TkGroupImpl; class TkAssetImpl; NVBLASTTK_IMPL_DECLARE(Family) { public: TkFamilyImpl(); TkFamilyImpl(const NvBlastID& id); ~TkFamilyImpl(); NVBLASTTK_IMPL_DEFINE_IDENTIFIABLE('A', 'C', 'T', 'F'); // Begin TkFamily virtual const NvBlastFamily* getFamilyLL() const override; virtual uint32_t getActorCount() const override; virtual uint32_t getActors(TkActor** buffer, uint32_t bufferSize, uint32_t indexStart = 0) const override; virtual void addListener(TkEventListener& l) override { m_queue.addListener(l); } virtual void removeListener(TkEventListener& l) override { m_queue.removeListener(l); } virtual void applyFracture(const NvBlastFractureBuffers* commands) override { applyFractureInternal(commands); } virtual const TkAsset* getAsset() const override; virtual void reinitialize(const NvBlastFamily* newFamily, TkGroup* group) override; // End TkFamily // Public methods static TkFamilyImpl* create(const TkAssetImpl* asset); const TkAssetImpl* getAssetImpl() const; NvBlastFamily* getFamilyLLInternal() const; uint32_t getActorCountInternal() const; TkActorImpl* addActor(NvBlastActor* actorLL); void applyFractureInternal(const NvBlastFractureBuffers* commands); void removeActor(TkActorImpl* actorLL); TkEventQueue& getQueue() { return m_queue; } TkActorImpl* getActorByActorLL(const NvBlastActor* actorLL); void updateJoints(TkActorImpl* actor, TkEventQueue* alternateQueue = nullptr); Array<TkActorImpl>::type& getActorsInternal(); uint32_t getInternalJointCount() const; TkJointImpl* getInternalJoints() const; TkJointImpl** createExternalJointHandle(const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1); bool deleteExternalJointHandle(TkJointImpl*& joint, const NvBlastID& otherFamilyID, uint32_t chunkIndex0, uint32_t chunkIndex1); void releaseJoint(TkJointImpl& joint); TkActorImpl* getActorByChunk(uint32_t chunkIndex); typedef nvidia::shdfnd::Pair<uint32_t, uint32_t> ExternalJointKey; //!< The chunk indices within the TkFamily objects joined by the joint. These chunks will be support chunks. TkJointImpl* findExternalJoint(const TkFamilyImpl* otherFamily, ExternalJointKey key) const; private: TkActorImpl* getActorByIndex(uint32_t index); struct JointSet { NvBlastID m_familyID; HashMap<ExternalJointKey, TkJointImpl*>::type m_joints; }; typedef HashMap<NvBlastID, uint32_t>::type FamilyIDMap; NvBlastFamily* m_familyLL; Array<TkActorImpl>::type m_actors; uint32_t m_internalJointCount; Array<uint8_t>::type m_internalJointBuffer; Array<JointSet*>::type m_jointSets; FamilyIDMap m_familyIDMap; const TkAssetImpl* m_asset; TkEventQueue m_queue; }; //////// TkFamilyImpl inline methods //////// NV_INLINE const TkAssetImpl* TkFamilyImpl::getAssetImpl() const { return m_asset; } NV_INLINE NvBlastFamily* TkFamilyImpl::getFamilyLLInternal() const { return m_familyLL; } NV_INLINE uint32_t TkFamilyImpl::getActorCountInternal() const { NVBLAST_ASSERT(m_familyLL != nullptr); return NvBlastFamilyGetActorCount(m_familyLL, logLL); } NV_INLINE TkActorImpl* TkFamilyImpl::getActorByIndex(uint32_t index) { NVBLAST_ASSERT(index < m_actors.size()); return &m_actors[index]; } NV_INLINE TkActorImpl* TkFamilyImpl::getActorByActorLL(const NvBlastActor* actorLL) { uint32_t index = NvBlastActorGetIndex(actorLL, logLL); return getActorByIndex(index); } NV_INLINE Array<TkActorImpl>::type& TkFamilyImpl::getActorsInternal() { return m_actors; } NV_INLINE uint32_t TkFamilyImpl::getInternalJointCount() const { return m_internalJointCount; } NV_INLINE TkJointImpl* TkFamilyImpl::getInternalJoints() const { return const_cast<TkJointImpl*>(reinterpret_cast<const TkJointImpl*>(m_internalJointBuffer.begin())); } NV_INLINE void TkFamilyImpl::releaseJoint(TkJointImpl& joint) { NVBLAST_ASSERT(joint.m_owner == this); NVBLAST_ASSERT(&joint >= getInternalJoints() && &joint < getInternalJoints() + getInternalJointCount() * sizeof(TkJointImpl)); joint.~TkJointImpl(); joint.m_owner = nullptr; } //////// Inline global functions //////// NV_INLINE const NvBlastID& getFamilyID(const TkActor* actor) { return actor != nullptr ? static_cast<const TkActorImpl*>(actor)->getFamilyImpl().getIDInternal() : *reinterpret_cast<const NvBlastID*>("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); } NV_INLINE const NvBlastID& getFamilyID(const TkFamilyImpl* family) { return family != nullptr ? family->getIDInternal() : *reinterpret_cast<const NvBlastID*>("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); } } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTTKFAMILYIMPL_H
7,463
C
31.593886
182
0.679619
NVIDIA-Omniverse/PhysX/blast/source/sdk/globals/NvBlastInternalProfiler.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTINTERNALPROFILER_H #define NVBLASTINTERNALPROFILER_H #include "NvPreprocessor.h" #if NV_NVTX #include "nvToolsExt.h" NV_INLINE void platformZoneStart(const char* name) { nvtxRangePushA(name); } NV_INLINE void platformZoneEnd() { nvtxRangePop(); } #else NV_INLINE void platformZoneStart(const char*) { } NV_INLINE void platformZoneEnd() { } #endif namespace Nv { namespace Blast { /** Profiler detail to be reported. The higher setting is used, the more details are reported. */ struct InternalProfilerDetail { enum Level { LOW, MEDIUM, HIGH }; }; NV_C_API void NvBlastInternalProfilerSetPlatformEnabled(bool platformEnabled); NV_C_API void NvBlastInternalProfilerSetDetail(Nv::Blast::InternalProfilerDetail::Level); NV_C_API Nv::Blast::InternalProfilerDetail::Level NvBlastInternalProfilerGetDetail(); #if NV_PROFILE NV_C_API void NvBlastProfilerBegin(const char* name, Nv::Blast::InternalProfilerDetail::Level); NV_C_API void NvBlastProfilerEnd(const void* name, Nv::Blast::InternalProfilerDetail::Level); class ProfileScope { public: ProfileScope(const char* name, InternalProfilerDetail::Level level) :m_name(name), m_level(level) { NvBlastProfilerBegin(m_name, m_level); } ~ProfileScope() { NvBlastProfilerEnd(m_name, m_level); } private: const char* m_name; InternalProfilerDetail::Level m_level; }; #define BLAST_PROFILE_PREFIX "Blast: " #define BLAST_PROFILE_ZONE_BEGIN(name) Nv::Blast::NvBlastProfilerBegin(BLAST_PROFILE_PREFIX name, Nv::Blast::InternalProfilerDetail::HIGH) #define BLAST_PROFILE_ZONE_END(name) Nv::Blast::NvBlastProfilerEnd(BLAST_PROFILE_PREFIX name, Nv::Blast::InternalProfilerDetail::HIGH) #define BLAST_PROFILE_SCOPE(name, detail) Nv::Blast::ProfileScope NV_CONCAT(_scope,__LINE__) (BLAST_PROFILE_PREFIX name, detail) #define BLAST_PROFILE_SCOPE_L(name) BLAST_PROFILE_SCOPE(name, Nv::Blast::InternalProfilerDetail::LOW) #define BLAST_PROFILE_SCOPE_M(name) BLAST_PROFILE_SCOPE(name, Nv::Blast::InternalProfilerDetail::MEDIUM) #define BLAST_PROFILE_SCOPE_H(name) BLAST_PROFILE_SCOPE(name, Nv::Blast::InternalProfilerDetail::HIGH) #else #define BLAST_PROFILE_ZONE_BEGIN(name) #define BLAST_PROFILE_ZONE_END(name) #define BLAST_PROFILE_SCOPE_L(name) #define BLAST_PROFILE_SCOPE_M(name) #define BLAST_PROFILE_SCOPE_H(name) #endif } // namespace Blast } // namespace Nv #endif
4,037
C
35.709091
143
0.747089
NVIDIA-Omniverse/PhysX/blast/source/sdk/globals/NvBlastGlobals.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "NvBlastGlobals.h" #include "NvBlastAssert.h" #include "NvAllocatorCallback.h" #include "NvErrorCallback.h" #include "NsGlobals.h" #include <cstdlib> #include <sstream> #include <iostream> #if NV_WINDOWS_FAMILY #include <windows.h> #endif #if NV_WINDOWS_FAMILY || NV_LINUX_FAMILY #include <malloc.h> #endif namespace Nv { namespace Blast { #if NV_WINDOWS_FAMILY // on win32 we only have 8-byte alignment guaranteed, but the CRT provides special aligned allocation fns NV_FORCE_INLINE void* platformAlignedAlloc(size_t size) { return _aligned_malloc(size, 16); } NV_FORCE_INLINE void platformAlignedFree(void* ptr) { _aligned_free(ptr); } #elif NV_LINUX_FAMILY NV_FORCE_INLINE void* platformAlignedAlloc(size_t size) { return ::memalign(16, size); } NV_FORCE_INLINE void platformAlignedFree(void* ptr) { ::free(ptr); } #else NV_FORCE_INLINE void* platformAlignedAlloc(size_t size) { const int A = 16; unsigned char* mem = (unsigned char*)malloc(size + A); const unsigned char offset = (unsigned char)((uintptr_t)A - (uintptr_t)mem % A - 1); mem += offset; *mem++ = offset; return mem; } NV_FORCE_INLINE void platformAlignedFree(void* ptr) { if (ptr != nullptr) { unsigned char* mem = (unsigned char*)ptr; const unsigned char offset = *--mem; ::free(mem - offset); } } #endif class DefaultAllocatorCallback : public nvidia::NvAllocatorCallback { public: virtual void* allocate(size_t size, const char* typeName, const char* filename, int line) override { NV_UNUSED(typeName); NV_UNUSED(filename); NV_UNUSED(line); return platformAlignedAlloc(size); } virtual void deallocate(void* ptr) override { platformAlignedFree(ptr); } }; DefaultAllocatorCallback s_defaultAllocatorCallback; class DefaultErrorCallback : public nvidia::NvErrorCallback { virtual void reportError(nvidia::NvErrorCode::Enum code, const char* msg, const char* file, int line) override { #if 1 || NV_DEBUG || NV_CHECKED std::stringstream str; str << "NvBlast "; bool critical = false; switch (code) { case nvidia::NvErrorCode::eNO_ERROR: str << "[Info]"; critical = false; break; case nvidia::NvErrorCode::eDEBUG_INFO: str << "[Debug Info]"; critical = false; break; case nvidia::NvErrorCode::eDEBUG_WARNING: str << "[Debug Warning]"; critical = false; break; case nvidia::NvErrorCode::eINVALID_PARAMETER: str << "[Invalid Parameter]"; critical = true; break; case nvidia::NvErrorCode::eINVALID_OPERATION: str << "[Invalid Operation]"; critical = true; break; case nvidia::NvErrorCode::eOUT_OF_MEMORY: str << "[Out of] Memory"; critical = true; break; case nvidia::NvErrorCode::eINTERNAL_ERROR: str << "[Internal Error]"; critical = true; break; case nvidia::NvErrorCode::eABORT: str << "[Abort]"; critical = true; break; case nvidia::NvErrorCode::ePERF_WARNING: str << "[Perf Warning]"; critical = false; break; default: NVBLAST_ASSERT(false); } str << file << "(" << line << "): " << msg << "\n"; std::string message = str.str(); std::cout << message; #if NV_WINDOWS_FAMILY OutputDebugStringA(message.c_str()); #endif NVBLAST_ASSERT_WITH_MESSAGE(!critical, message.c_str()); #else NV_UNUSED(code); NV_UNUSED(msg); NV_UNUSED(file); NV_UNUSED(line); #endif } }; static DefaultErrorCallback s_defaultErrorCallback; static nvidia::NvAllocatorCallback* s_allocatorCallback = &s_defaultAllocatorCallback; static nvidia::NvErrorCallback* s_errorCallback = &s_defaultErrorCallback; nvidia::NvProfilerCallback *g_profilerCallback = nullptr; } // namespace Blast } // namespace Nv //////// Global API implementation //////// nvidia::NvAllocatorCallback* NvBlastGlobalGetAllocatorCallback() { return Nv::Blast::s_allocatorCallback; } void NvBlastGlobalSetAllocatorCallback(nvidia::NvAllocatorCallback* allocator) { Nv::Blast::s_allocatorCallback = allocator ? allocator : &Nv::Blast::s_defaultAllocatorCallback; } nvidia::NvErrorCallback* NvBlastGlobalGetErrorCallback() { return Nv::Blast::s_errorCallback; } void NvBlastGlobalSetErrorCallback(nvidia::NvErrorCallback* errorCallback) { Nv::Blast::s_errorCallback = errorCallback ? errorCallback : &Nv::Blast::s_defaultErrorCallback; } nvidia::NvProfilerCallback* NvBlastGlobalGetProfilerCallback() { return Nv::Blast::g_profilerCallback; } void NvBlastGlobalSetProfilerCallback(nvidia::NvProfilerCallback* profilerCallback) { Nv::Blast::g_profilerCallback = profilerCallback; }
6,403
C++
32.181347
114
0.689989
NVIDIA-Omniverse/PhysX/blast/source/sdk/globals/NvBlastInternalProfiler.cpp
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #include "stdint.h" #include "NvProfiler.h" #include "NvBlastGlobals.h" #include "NvBlastInternalProfiler.h" #include "NvBlastAssert.h" namespace Nv { namespace Blast { #define SUPPORTS_THREAD_LOCAL (!NV_VC || NV_VC > 12) struct InternalProfilerData { const char* name; void* data; }; #if SUPPORTS_THREAD_LOCAL static const int32_t PROFILER_MAX_NESTED_DEPTH = 64; static thread_local InternalProfilerData th_ProfileData[PROFILER_MAX_NESTED_DEPTH]; static thread_local int32_t th_depth = 0; #endif extern nvidia::NvProfilerCallback *g_profilerCallback; /** Wraps the nvidia::NvProfilerCallback set in NvBlastGlobalSetProfilerCallback. */ class InternalProfiler { public: /** Construct a InternalProfiler with platform specific profiler signals disabled. */ InternalProfiler() : m_platformEnabled(false) {} void zoneStart(const char* name) { #if SUPPORTS_THREAD_LOCAL if (g_profilerCallback) { void* data = g_profilerCallback->zoneStart(name, false, 0xb1a57); if (th_depth < PROFILER_MAX_NESTED_DEPTH && th_depth >= 0) { th_ProfileData[th_depth].name = name; th_ProfileData[th_depth].data = data; th_depth++; } else { NVBLAST_ASSERT(th_depth < PROFILER_MAX_NESTED_DEPTH && th_depth >= 0); } } #endif if (m_platformEnabled) { platformZoneStart(name); } } void zoneEnd() { #if SUPPORTS_THREAD_LOCAL if (g_profilerCallback) { th_depth--; if (th_depth >= 0) { InternalProfilerData& pd = th_ProfileData[th_depth]; g_profilerCallback->zoneEnd(pd.data, pd.name, false, 0xb1a57); } else { NVBLAST_ASSERT(th_depth >= 0); } } #endif if (m_platformEnabled) { platformZoneEnd(); } } ////// local interface ////// /** Enable or disable platform specific profiler signals. Disabled by default. \param[in] enabled true enables, false disables platform profiler calls. */ void setPlatformEnabled(bool enabled) { m_platformEnabled = enabled; } private: bool m_platformEnabled; }; static InternalProfiler g_InternalProfiler; static InternalProfilerDetail::Level g_ProfilerDetail = InternalProfilerDetail::LOW; void NvBlastInternalProfilerSetPlatformEnabled(bool platformEnabled) { return g_InternalProfiler.setPlatformEnabled(platformEnabled); } void NvBlastInternalProfilerSetDetail(InternalProfilerDetail::Level level) { g_ProfilerDetail = level; } InternalProfilerDetail::Level NvBlastProfilerGetDetail() { return g_ProfilerDetail; } void NvBlastProfilerBegin(const char* name, InternalProfilerDetail::Level level) { if (level <= NvBlastProfilerGetDetail()) { g_InternalProfiler.zoneStart(name); } } void NvBlastProfilerEnd(const void* /*name*/, InternalProfilerDetail::Level level) { if (level <= NvBlastProfilerGetDetail()) { g_InternalProfiler.zoneEnd(); } } } // namespace Blast } // namespace Nv
4,824
C++
27.052325
86
0.67558
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastArray.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTARRAY_H #define NVBLASTARRAY_H #include "NvBlastAllocator.h" #include "NsInlineArray.h" namespace Nv { namespace Blast { /** Wrapped NvShared Array that uses NvBlastGlobals AllocatorCallback. */ template <class T> struct Array { typedef nvidia::shdfnd::Array<T, Allocator> type; }; /** Wrapped NvShared InlineArray that uses NvBlastGlobals AllocatorCallback. InlineArraya is array that pre-allocates for N elements. */ template <class T, uint32_t N> struct InlineArray { typedef nvidia::shdfnd::InlineArray<T, N, Allocator> type; }; } // namespace Blast } // namespace Nv #endif // #ifndef NVBLASTARRAY_H
2,203
C
31.895522
74
0.761235
NVIDIA-Omniverse/PhysX/blast/source/sdk/common/NvBlastFixedPriorityQueue.h
// Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved. #ifndef NVBLASTFIXEDPRIORITYQUEUE_H #define NVBLASTFIXEDPRIORITYQUEUE_H #include "NvBlastAssert.h" #include "NvBlastMemory.h" namespace Nv { namespace Blast { /*! FixedPriorityQueue is a priority queue container which is intended to be used with placement new on chunk of memory. It'll use following memory for data layout. As follows: // some memory char ​*buf = new char[64 *​ 1024]; // placement new on this memory FixedPriorityQueue<SomeClass>* arr = new (buf) FixedPriorityQueue<SomeClass>(); // you can get max requiredMemorySize by an array of 'capacity' elements count to use memory left buf = buf + FixedPriorityQueue<SomeClass>::requiredMemorySize(capacity); buf: +------------------------------------------------------------+ | uint32_t | T[0] | T[1] | T[2] | ... | +------------------------------------------------------------+ */ template <typename A> struct Less { bool operator()(const A& a, const A& b) const { return a < b; } }; template<class Element, class Comparator = Less<Element> > class FixedPriorityQueue : protected Comparator // inherit so that stateless comparators take no space { public: FixedPriorityQueue(const Comparator& less = Comparator()) : Comparator(less), mHeapSize(0) { } ~FixedPriorityQueue() { } static size_t requiredMemorySize(uint32_t capacity) { return align16(sizeof(FixedPriorityQueue<Element, Comparator>)) + align16(capacity * sizeof(Element)); } //! Get the element with the highest priority const Element top() const { return data()[0]; } //! Get the element with the highest priority Element top() { return data()[0]; } //! Check to whether the priority queue is empty bool empty() const { return (mHeapSize == 0); } //! Empty the priority queue void clear() { mHeapSize = 0; } //! Insert a new element into the priority queue. Only valid when size() is less than Capacity void push(const Element& value) { uint32_t newIndex; uint32_t parentIndex = parent(mHeapSize); for (newIndex = mHeapSize; newIndex > 0 && compare(value, data()[parentIndex]); newIndex = parentIndex, parentIndex= parent(newIndex)) { data()[ newIndex ] = data()[parentIndex]; } data()[newIndex] = value; mHeapSize++; NVBLAST_ASSERT(valid()); } //! Delete the highest priority element. Only valid when non-empty. Element pop() { NVBLAST_ASSERT(mHeapSize > 0); uint32_t i, child; //try to avoid LHS uint32_t tempHs = mHeapSize-1; mHeapSize = tempHs; Element min = data()[0]; Element last = data()[tempHs]; for (i = 0; (child = left(i)) < tempHs; i = child) { /* Find highest priority child */ const uint32_t rightChild = child + 1; child += ((rightChild < tempHs) & compare((data()[rightChild]), (data()[child]))) ? 1 : 0; if(compare(last, data()[child])) break; data()[i] = data()[child]; } data()[ i ] = last; NVBLAST_ASSERT(valid()); return min; } //! Make sure the priority queue sort all elements correctly bool valid() const { const Element& min = data()[0]; for(uint32_t i=1; i<mHeapSize; ++i) { if(compare(data()[i], min)) return false; } return true; } //! Return number of elements in the priority queue uint32_t size() const { return mHeapSize; } private: uint32_t mHeapSize; NV_FORCE_INLINE Element* data() { return (Element*)((char*)this + sizeof(FixedPriorityQueue<Element, Comparator>)); } NV_FORCE_INLINE Element* data() const { return (Element*)((char*)this + sizeof(FixedPriorityQueue<Element, Comparator>)); } bool compare(const Element& a, const Element& b) const { return Comparator::operator()(a,b); } static uint32_t left(uint32_t nodeIndex) { return (nodeIndex << 1) + 1; } static uint32_t parent(uint32_t nodeIndex) { return (nodeIndex - 1) >> 1; } FixedPriorityQueue<Element, Comparator>& operator = (const FixedPriorityQueue<Element, Comparator>); }; } // namespace Blast } // namespace Nv #endif // ifndef NVBLASTFIXEDPRIORITYQUEUE_H
6,160
C
28.338095
143
0.621753