file_path
stringlengths 21
202
| content
stringlengths 12
1.02M
| size
int64 12
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 3.33
100
| max_line_length
int64 10
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/PhysX/physx/include/gpu/PxGpu.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_GPU_H
#define PX_GPU_H
#include "PxPhysXConfig.h"
#if PX_SUPPORT_GPU_PHYSX
#include "cudamanager/PxCudaContextManager.h"
#include "foundation/Px.h"
#include "foundation/PxPreprocessor.h"
#include "foundation/PxFoundation.h"
#include "common/PxPhysXCommonConfig.h"
/**
\brief PxGpuLoadHook
This is a helper class for loading the PhysXGpu dll.
If a PhysXGpu dll with a non-default file name needs to be loaded,
PxGpuLoadHook can be sub-classed to provide the custom filenames.
Once the names are set, the instance must be set for use by PhysX.dll using PxSetPhysXGpuLoadHook(),
@see PxSetPhysXGpuLoadHook()
*/
class PxGpuLoadHook
{
public:
PxGpuLoadHook() {}
virtual ~PxGpuLoadHook() {}
virtual const char* getPhysXGpuDllName() const = 0;
protected:
private:
};
/**
\brief Sets GPU load hook instance for PhysX dll.
\param[in] hook GPU load hook.
@see PxGpuLoadHook
*/
PX_C_EXPORT PX_PHYSX_CORE_API void PX_CALL_CONV PxSetPhysXGpuLoadHook(const PxGpuLoadHook* hook);
/**
* \brief Ask the NVIDIA control panel which GPU has been selected for use by
* PhysX. Returns -1 if no PhysX capable GPU is found or GPU PhysX has
* been disabled.
*/
PX_C_EXPORT PX_PHYSX_CORE_API int PX_CALL_CONV PxGetSuggestedCudaDeviceOrdinal(physx::PxErrorCallback& errc);
/**
* \brief Allocate a CUDA Context manager, complete with heaps.
* You only need one CUDA context manager per GPU device you intend to use for
* CUDA tasks.
\param[in] foundation PhysXFoundation instance.
\param[in] desc Cuda context manager desc.
\param[in] profilerCallback PhysX profiler callback instance.
\param[in] launchSynchronous Set launchSynchronous to true for CUDA to report the actual point of failure.
@see PxGetProfilerCallback()
*/
PX_C_EXPORT PX_PHYSX_CORE_API physx::PxCudaContextManager* PX_CALL_CONV PxCreateCudaContextManager(physx::PxFoundation& foundation, const physx::PxCudaContextManagerDesc& desc, physx::PxProfilerCallback* profilerCallback = NULL, bool launchSynchronous = false);
/**
* \brief Sets profiler callback to PhysX GPU
\param[in] profilerCallback PhysX profiler callback instance.
@see PxGetProfilerCallback()
*/
PX_C_EXPORT PX_PHYSX_CORE_API void PX_CALL_CONV PxSetPhysXGpuProfilerCallback(physx::PxProfilerCallback* profilerCallback);
/**
\brief Internally used callback to register function names of cuda kernels
*/
PX_C_EXPORT PX_PHYSX_CORE_API void PX_CALL_CONV PxCudaRegisterFunction(int moduleIndex, const char* functionName);
/**
\brief Internally used callback to register cuda modules at load time
*/
PX_C_EXPORT PX_PHYSX_CORE_API void** PX_CALL_CONV PxCudaRegisterFatBinary(void*);
/**
\brief Access to the registered cuda modules
*/
PX_C_EXPORT PX_PHYSX_CORE_API void** PX_CALL_CONV PxGetCudaModuleTable();
/**
\brief Number of registered cuda modules
*/
PX_C_EXPORT PX_PHYSX_CORE_API physx::PxU32 PX_CALL_CONV PxGetCudaModuleTableSize();
/**
\brief Access to the loaded cuda functions (kernels)
*/
PX_C_EXPORT PX_PHYSX_CORE_API physx::PxKernelIndex* PX_CALL_CONV PxGetCudaFunctionTable();
/**
\brief Number of loaded cuda functions (kernels)
*/
PX_C_EXPORT PX_PHYSX_CORE_API physx::PxU32 PX_CALL_CONV PxGetCudaFunctionTableSize();
namespace physx
{
class PxPhysicsGpu;
}
PX_C_EXPORT PX_PHYSX_CORE_API physx::PxPhysicsGpu* PX_CALL_CONV PxGetPhysicsGpu();
#endif // PX_SUPPORT_GPU_PHYSX
#endif
| 4,927 | C | 33.222222 | 261 | 0.768419 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxConvexMeshGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_CONVEX_MESH_GEOMETRY_H
#define PX_CONVEX_MESH_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#include "geometry/PxMeshScale.h"
#include "common/PxCoreUtilityTypes.h"
#include "geometry/PxConvexMesh.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxConvexMesh;
/**
\brief Flags controlling the simulated behavior of the convex mesh geometry.
Used in ::PxConvexMeshGeometryFlags.
*/
struct PxConvexMeshGeometryFlag
{
enum Enum
{
eTIGHT_BOUNDS = (1<<0) //!< Use tighter (but more expensive to compute) bounds around the convex geometry.
};
};
/**
\brief collection of set bits defined in PxConvexMeshGeometryFlag.
@see PxConvexMeshGeometryFlag
*/
typedef PxFlags<PxConvexMeshGeometryFlag::Enum,PxU8> PxConvexMeshGeometryFlags;
PX_FLAGS_OPERATORS(PxConvexMeshGeometryFlag::Enum,PxU8)
/**
\brief Convex mesh geometry class.
This class unifies a convex mesh object with a scaling transform, and
lets the combined object be used anywhere a PxGeometry is needed.
The scaling is a transform along arbitrary axes contained in the scale object.
The vertices of the mesh in geometry (or shape) space is the
PxMeshScale::toMat33() transform, multiplied by the vertex space vertices
in the PxConvexMesh object.
*/
class PxConvexMeshGeometry : public PxGeometry
{
public:
/**
\brief Constructor. By default creates an empty object with a NULL mesh and identity scale.
\param[in] mesh Mesh pointer. May be NULL, though this will not make the object valid for shape construction.
\param[in] scaling Scale factor.
\param[in] flags Mesh flags.
\
*/
PX_INLINE PxConvexMeshGeometry( PxConvexMesh* mesh = NULL,
const PxMeshScale& scaling = PxMeshScale(),
PxConvexMeshGeometryFlags flags = PxConvexMeshGeometryFlag::eTIGHT_BOUNDS) :
PxGeometry (PxGeometryType::eCONVEXMESH),
scale (scaling),
convexMesh (mesh),
meshFlags (flags)
{
}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxConvexMeshGeometry(const PxConvexMeshGeometry& that) :
PxGeometry (that),
scale (that.scale),
convexMesh (that.convexMesh),
meshFlags (that.meshFlags)
{
}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxConvexMeshGeometry& that)
{
mType = that.mType;
scale = that.scale;
convexMesh = that.convexMesh;
meshFlags = that.meshFlags;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid for shape creation.
\note A valid convex mesh has a positive scale value in each direction (scale.x > 0, scale.y > 0, scale.z > 0).
It is illegal to call PxRigidActor::createShape and PxPhysics::createShape with a convex that has zero extent in any direction.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_INLINE bool isValid() const;
public:
PxMeshScale scale; //!< The scaling transformation (from vertex space to shape space).
PxConvexMesh* convexMesh; //!< A reference to the convex mesh object.
PxConvexMeshGeometryFlags meshFlags; //!< Mesh flags.
PxPadding<3> paddingFromFlags; //!< padding for mesh flags
};
PX_INLINE bool PxConvexMeshGeometry::isValid() const
{
if(mType != PxGeometryType::eCONVEXMESH)
return false;
if(!scale.scale.isFinite() || !scale.rotation.isUnit())
return false;
if(!scale.isValidForConvexMesh())
return false;
if(!convexMesh)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 5,179 | C | 30.779141 | 128 | 0.748793 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxSphereGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SPHERE_GEOMETRY_H
#define PX_SPHERE_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief A class representing the geometry of a sphere.
Spheres are defined by their radius.
\note The scaling of the sphere is expected to be baked into this value, there is no additional scaling parameter.
*/
class PxSphereGeometry : public PxGeometry
{
public:
/**
\brief Constructor.
*/
PX_INLINE PxSphereGeometry(PxReal ir=0.0f) : PxGeometry(PxGeometryType::eSPHERE), radius(ir) {}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxSphereGeometry(const PxSphereGeometry& that) : PxGeometry(that), radius(that.radius) {}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxSphereGeometry& that)
{
mType = that.mType;
radius = that.radius;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid
\note A valid sphere has radius > 0.
It is illegal to call PxRigidActor::createShape and PxPhysics::createShape with a sphere that has zero radius.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_INLINE bool isValid() const;
public:
/**
\brief The radius of the sphere.
*/
PxReal radius;
};
PX_INLINE bool PxSphereGeometry::isValid() const
{
if(mType != PxGeometryType::eSPHERE)
return false;
if(!PxIsFinite(radius))
return false;
if(radius <= 0.0f)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 3,260 | C | 28.917431 | 114 | 0.741718 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxMeshScale.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MESH_SCALE_H
#define PX_MESH_SCALE_H
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxMat33.h"
#include "foundation/PxAssert.h"
/** \brief Minimum allowed absolute magnitude for each of mesh scale's components (x,y,z).
\note Only positive scale values are allowed for convex meshes. */
#define PX_MESH_SCALE_MIN 1e-6f
/** \brief Maximum allowed absolute magnitude for each of mesh scale's components (x,y,z).
\note Only positive scale values are allowed for convex meshes. */
#define PX_MESH_SCALE_MAX 1e6f
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief A class expressing a nonuniform scaling transformation.
The scaling is along arbitrary axes that are specified by PxMeshScale::rotation. Specifically, PxMeshScale::rotation
describes the rotation from the scaling-axes frame to the mesh-local frame, i.e. PxMeshScale::rotation.rotate(v) transforms
the coordinates of vertex v from the mesh-local frame to the scaling-axes frame.
\note Negative scale values are supported for PxTriangleMeshGeometry
with absolute values for each component within [PX_MIN_ABS_MESH_SCALE, PX_MAX_ABS_MESH_SCALE] range.
Negative scale causes a reflection around the specified axis, in addition PhysX will flip the normals
for mesh triangles when scale.x*scale.y*scale.z < 0.
\note Only positive scale values are supported for PxConvexMeshGeometry
with values for each component within [PX_MIN_ABS_MESH_SCALE, PX_MAX_ABS_MESH_SCALE] range).
@see PxConvexMeshGeometry PxTriangleMeshGeometry
*/
class PxMeshScale
{
public:
/**
\brief Constructor initializes to identity scale.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMeshScale(): scale(1.0f), rotation(PxIdentity)
{
}
/**
\brief Constructor from scalar.
*/
explicit PX_CUDA_CALLABLE PX_FORCE_INLINE PxMeshScale(PxReal r): scale(r), rotation(PxIdentity)
{
}
/**
\brief Constructor to initialize to arbitrary scale and identity scale rotation.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMeshScale(const PxVec3& s)
{
scale = s;
rotation = PxQuat(PxIdentity);
}
/**
\brief Constructor to initialize to arbitrary scaling.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMeshScale(const PxVec3& s, const PxQuat& r)
{
PX_ASSERT(r.isUnit());
scale = s;
rotation = r;
}
/**
\brief Returns true if the scaling is an identity transformation.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isIdentity() const
{
return (scale.x == 1.0f && scale.y == 1.0f && scale.z == 1.0f);
}
/**
\brief Returns the inverse of this scaling transformation.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMeshScale getInverse() const
{
return PxMeshScale(PxVec3(1.0f/scale.x, 1.0f/scale.y, 1.0f/scale.z), rotation);
}
/**
\brief Converts this transformation to a 3x3 matrix representation.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxMat33 toMat33() const
{
PxMat33 rot(rotation);
PxMat33 trans = rot.getTranspose();
trans.column0 *= scale[0];
trans.column1 *= scale[1];
trans.column2 *= scale[2];
return trans * rot;
}
/**
\brief Returns true if combination of negative scale components will cause the triangle normal to flip. The SDK will flip the normals internally.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE bool hasNegativeDeterminant() const
{
return (scale.x * scale.y * scale.z < 0.0f);
}
PxVec3 transform(const PxVec3& v) const
{
return rotation.rotateInv(scale.multiply(rotation.rotate(v)));
}
bool isValidForTriangleMesh() const
{
PxVec3 absXYZ = scale.abs();
return (absXYZ.maxElement() <= PX_MESH_SCALE_MAX) && (absXYZ.minElement() >= PX_MESH_SCALE_MIN);
}
bool isValidForConvexMesh() const
{
return (scale.maxElement() <= PX_MESH_SCALE_MAX) && (scale.minElement() >= PX_MESH_SCALE_MIN);
}
PxVec3 scale; //!< A nonuniform scaling
PxQuat rotation; //!< The orientation of the scaling axes
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 5,615 | C | 32.628742 | 146 | 0.738557 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxTriangleMeshGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TRIANGLE_MESH_GEOMETRY_H
#define PX_TRIANGLE_MESH_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#include "geometry/PxMeshScale.h"
#include "common/PxCoreUtilityTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxTriangleMesh;
/**
\brief Flags controlling the simulated behavior of the triangle mesh geometry.
Used in ::PxMeshGeometryFlags.
*/
struct PxMeshGeometryFlag
{
enum Enum
{
eTIGHT_BOUNDS = (1<<0), //!< Use tighter (but more expensive to compute) bounds around the triangle mesh geometry.
eDOUBLE_SIDED = (1<<1) //!< Meshes with this flag set are treated as double-sided.
//!< This flag is currently only used for raycasts and sweeps (it is ignored for overlap queries).
//!< For detailed specifications of this flag for meshes and heightfields please refer to the Geometry Query section of the user guide.
};
};
/**
\brief collection of set bits defined in PxMeshGeometryFlag.
@see PxMeshGeometryFlag
*/
typedef PxFlags<PxMeshGeometryFlag::Enum,PxU8> PxMeshGeometryFlags;
PX_FLAGS_OPERATORS(PxMeshGeometryFlag::Enum,PxU8)
/**
\brief Triangle mesh geometry class.
This class unifies a mesh object with a scaling transform, and
lets the combined object be used anywhere a PxGeometry is needed.
The scaling is a transform along arbitrary axes contained in the scale object.
The vertices of the mesh in geometry (or shape) space is the
PxMeshScale::toMat33() transform, multiplied by the vertex space vertices
in the PxTriangleMeshGeometry object.
*/
class PxTriangleMeshGeometry : public PxGeometry
{
public:
/**
\brief Constructor. By default creates an empty object with a NULL mesh and identity scale.
\param[in] mesh Mesh pointer. May be NULL, though this will not make the object valid for shape construction.
\param[in] scaling Scale factor.
\param[in] flags Mesh flags.
*/
PX_INLINE PxTriangleMeshGeometry( PxTriangleMesh* mesh = NULL,
const PxMeshScale& scaling = PxMeshScale(),
PxMeshGeometryFlags flags = PxMeshGeometryFlags()) :
PxGeometry (PxGeometryType::eTRIANGLEMESH),
scale (scaling),
meshFlags (flags),
triangleMesh(mesh)
{}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxTriangleMeshGeometry(const PxTriangleMeshGeometry& that) :
PxGeometry (that),
scale (that.scale),
meshFlags (that.meshFlags),
triangleMesh(that.triangleMesh)
{}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxTriangleMeshGeometry& that)
{
mType = that.mType;
scale = that.scale;
meshFlags = that.meshFlags;
triangleMesh = that.triangleMesh;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid for shape creation.
\note A valid triangle mesh has a positive scale value in each direction (scale.scale.x > 0, scale.scale.y > 0, scale.scale.z > 0).
It is illegal to call PxRigidActor::createShape and PxPhysics::createShape with a triangle mesh that has zero extents in any direction.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_INLINE bool isValid() const;
public:
PxMeshScale scale; //!< The scaling transformation.
PxMeshGeometryFlags meshFlags; //!< Mesh flags.
PxPadding<3> paddingFromFlags; //!< padding for mesh flags
PxTriangleMesh* triangleMesh; //!< A reference to the mesh object.
};
PX_INLINE bool PxTriangleMeshGeometry::isValid() const
{
if(mType != PxGeometryType::eTRIANGLEMESH)
return false;
if(!scale.scale.isFinite() || !scale.rotation.isUnit())
return false;
if(!scale.isValidForTriangleMesh())
return false;
if(!triangleMesh)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 5,436 | C | 32.770186 | 143 | 0.748344 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxPlaneGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PLANE_GEOMETRY_H
#define PX_PLANE_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#include "foundation/PxFoundationConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Class describing a plane geometry.
The plane geometry specifies the half-space volume x<=0. As with other geometry types,
when used in a PxShape the collision volume is obtained by transforming the halfspace
by the shape local pose and the actor global pose.
To generate a PxPlane from a PxTransform, transform PxPlane(1,0,0,0).
To generate a PxTransform from a PxPlane, use PxTransformFromPlaneEquation.
@see PxShape.setGeometry() PxShape.getPlaneGeometry() PxTransformFromPlaneEquation
*/
class PxPlaneGeometry : public PxGeometry
{
public:
/**
\brief Constructor.
*/
PX_INLINE PxPlaneGeometry() : PxGeometry(PxGeometryType::ePLANE) {}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxPlaneGeometry(const PxPlaneGeometry& that) : PxGeometry(that) {}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxPlaneGeometry& that)
{
mType = that.mType;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid
*/
PX_INLINE bool isValid() const;
};
PX_INLINE bool PxPlaneGeometry::isValid() const
{
if(mType != PxGeometryType::ePLANE)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 3,159 | C | 30.6 | 87 | 0.752453 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxConvexMesh.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_CONVEX_MESH_H
#define PX_CONVEX_MESH_H
/** \addtogroup geomutils
@{
*/
#include "foundation/Px.h"
#include "common/PxBase.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Polygon data
Plane format: (mPlane[0],mPlane[1],mPlane[2]).dot(x) + mPlane[3] = 0
With the normal outward-facing from the hull.
*/
struct PxHullPolygon
{
PxReal mPlane[4]; //!< Plane equation for this polygon
PxU16 mNbVerts; //!< Number of vertices/edges in the polygon
PxU16 mIndexBase; //!< Offset in index buffer
};
/**
\brief A convex mesh.
Internally represented as a list of convex polygons. The number
of polygons is limited to 256.
To avoid duplicating data when you have several instances of a particular
mesh positioned differently, you do not use this class to represent a
convex object directly. Instead, you create an instance of this mesh via
the PxConvexMeshGeometry and PxShape classes.
<h3>Creation</h3>
To create an instance of this class call PxPhysics::createConvexMesh(),
and PxConvexMesh::release() to delete it. This is only possible
once you have released all of its #PxShape instances.
<h3>Visualizations:</h3>
\li #PxVisualizationParameter::eCOLLISION_AABBS
\li #PxVisualizationParameter::eCOLLISION_SHAPES
\li #PxVisualizationParameter::eCOLLISION_AXES
\li #PxVisualizationParameter::eCOLLISION_FNORMALS
\li #PxVisualizationParameter::eCOLLISION_EDGES
@see PxConvexMeshDesc PxPhysics.createConvexMesh()
*/
class PxConvexMesh : public PxRefCounted
{
public:
/**
\brief Returns the number of vertices.
\return Number of vertices.
@see getVertices()
*/
virtual PxU32 getNbVertices() const = 0;
/**
\brief Returns the vertices.
\return Array of vertices.
@see getNbVertices()
*/
virtual const PxVec3* getVertices() const = 0;
/**
\brief Returns the index buffer.
\return Index buffer.
@see getNbPolygons() getPolygonData()
*/
virtual const PxU8* getIndexBuffer() const = 0;
/**
\brief Returns the number of polygons.
\return Number of polygons.
@see getIndexBuffer() getPolygonData()
*/
virtual PxU32 getNbPolygons() const = 0;
/**
\brief Returns the polygon data.
\param[in] index Polygon index in [0 ; getNbPolygons()[.
\param[out] data Polygon data.
\return True if success.
@see getIndexBuffer() getNbPolygons()
*/
virtual bool getPolygonData(PxU32 index, PxHullPolygon& data) const = 0;
/**
\brief Decrements the reference count of a convex mesh and releases it if the new reference count is zero.
@see PxPhysics.createConvexMesh() PxConvexMeshGeometry PxShape
*/
virtual void release() = 0;
/**
\brief Returns the mass properties of the mesh assuming unit density.
The following relationship holds between mass and volume:
mass = volume * density
The mass of a unit density mesh is equal to its volume, so this function returns the volume of the mesh.
Similarly, to obtain the localInertia of an identically shaped object with a uniform density of d, simply multiply the
localInertia of the unit density mesh by d.
\param[out] mass The mass of the mesh assuming unit density.
\param[out] localInertia The inertia tensor in mesh local space assuming unit density.
\param[out] localCenterOfMass Position of center of mass (or centroid) in mesh local space.
*/
virtual void getMassInformation(PxReal& mass, PxMat33& localInertia, PxVec3& localCenterOfMass) const = 0;
/**
\brief Returns the local-space (vertex space) AABB from the convex mesh.
\return local-space bounds
*/
virtual PxBounds3 getLocalBounds() const = 0;
/**
\brief Returns the local-space Signed Distance Field for this mesh if it has one.
\return local-space SDF.
*/
virtual const PxReal* getSDF() const = 0;
virtual const char* getConcreteTypeName() const { return "PxConvexMesh"; }
/**
\brief This method decides whether a convex mesh is gpu compatible. If the total number of vertices are more than 64 or any number of vertices in a polygon is more than 32, or
convex hull data was not cooked with GPU data enabled during cooking or was loaded from a serialized collection, the convex hull is incompatible with GPU collision detection. Otherwise
it is compatible.
\return True if the convex hull is gpu compatible
*/
virtual bool isGpuCompatible() const = 0;
protected:
PX_INLINE PxConvexMesh(PxType concreteType, PxBaseFlags baseFlags) : PxRefCounted(concreteType, baseFlags) {}
PX_INLINE PxConvexMesh(PxBaseFlags baseFlags) : PxRefCounted(baseFlags) {}
virtual ~PxConvexMesh() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxConvexMesh", PxRefCounted); }
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 6,361 | C | 33.02139 | 185 | 0.753655 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxHeightFieldSample.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_HEIGHT_FIELD_SAMPLE_H
#define PX_HEIGHT_FIELD_SAMPLE_H
/** \addtogroup geomutils
@{ */
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxBitAndData.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Special material index values for height field samples.
@see PxHeightFieldSample.materialIndex0 PxHeightFieldSample.materialIndex1
*/
struct PxHeightFieldMaterial
{
enum Enum
{
eHOLE = 127 //!< A material indicating that the triangle should be treated as a hole in the mesh.
};
};
/**
\brief Heightfield sample format.
This format corresponds to the #PxHeightFieldFormat member PxHeightFieldFormat::eS16_TM.
An array of heightfield samples are used when creating a PxHeightField to specify
the elevation of the heightfield points. In addition the material and tessellation of the adjacent
triangles are specified.
@see PxHeightField PxHeightFieldDesc PxHeightFieldDesc.samples
*/
struct PxHeightFieldSample
{
/**
\brief The height of the heightfield sample
This value is scaled by PxHeightFieldGeometry::heightScale.
@see PxHeightFieldGeometry
*/
PxI16 height;
/**
\brief The triangle material index of the quad's lower triangle + tesselation flag
An index pointing into the material table of the shape which instantiates the heightfield.
This index determines the material of the lower of the quad's two triangles (i.e. the quad whose
upper-left corner is this sample, see the Guide for illustrations).
Special values of the 7 data bits are defined by PxHeightFieldMaterial
The tesselation flag specifies which way the quad is split whose upper left corner is this sample.
If the flag is set, the diagonal of the quad will run from this sample to the opposite vertex; if not,
it will run between the other two vertices (see the Guide for illustrations).
@see PxHeightFieldGeometry materialIndex1 PxShape.setmaterials() PxShape.getMaterials()
*/
PxBitAndByte materialIndex0;
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU8 tessFlag() const { return PxU8(materialIndex0.isBitSet() ? 1 : 0); } // PT: explicit conversion to make sure we don't break the code
PX_CUDA_CALLABLE PX_FORCE_INLINE void setTessFlag() { materialIndex0.setBit(); }
PX_CUDA_CALLABLE PX_FORCE_INLINE void clearTessFlag() { materialIndex0.clearBit(); }
/**
\brief The triangle material index of the quad's upper triangle + reserved flag
An index pointing into the material table of the shape which instantiates the heightfield.
This index determines the material of the upper of the quad's two triangles (i.e. the quad whose
upper-left corner is this sample, see the Guide for illustrations).
@see PxHeightFieldGeometry materialIndex0 PxShape.setmaterials() PxShape.getMaterials()
*/
PxBitAndByte materialIndex1;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 4,535 | C | 38.103448 | 172 | 0.770232 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxBVH.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_BVH_H
#define PX_BVH_H
/** \addtogroup geomutils
@{
*/
#include "common/PxBase.h"
#include "foundation/PxTransform.h"
#include "foundation/PxBounds3.h"
#include "geometry/PxGeometryQueryFlags.h"
#include "geometry/PxReportCallback.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxGeometry;
/**
\brief Class representing a bounding volume hierarchy.
PxBVH can be provided to PxScene::addActor. In this case the scene query
pruning structure inside PhysX SDK will store/update one bound per actor.
The scene queries against such an actor will query actor bounds and then
make a local space query against the provided BVH, which is in actor's
local space.
PxBVH can also be used as a standalone data-structure for arbitrary
purposes, unrelated to PxScene / PxActor.
@see PxScene::addActor
*/
class PxBVH : public PxBase
{
public:
struct RaycastCallback
{
RaycastCallback() {}
virtual ~RaycastCallback() {}
// Reports one raycast or sweep hit.
// boundsIndex [in] Index of touched bounds
// distance [in/out] Impact distance. Shrinks the ray if written out.
// return false to abort the query
virtual bool reportHit(PxU32 boundsIndex, PxReal& distance) = 0;
};
struct OverlapCallback
{
OverlapCallback() {}
virtual ~OverlapCallback() {}
// Reports one overlap hit.
// boundsIndex [in] Index of touched bounds
// return false to abort the query
virtual bool reportHit(PxU32 boundsIndex) = 0;
};
struct TraversalCallback
{
TraversalCallback() {}
virtual ~TraversalCallback() {}
// Reports one visited node.
// bounds [in] node bounds
// return true to continue traversing this branch
virtual bool visitNode(const PxBounds3& bounds) = 0;
// Reports one validated leaf node. Called on leaf nodes after visitNode returns true on them.
// nbPrims [in] number of primitives in the node
// prims [in] primitives in the node (nbPrims entries)
// return false to abort the query
virtual bool reportLeaf(PxU32 nbPrims, const PxU32* prims) = 0;
};
/**
\brief Raycast test against a BVH.
\param[in] origin The origin of the ray.
\param[in] unitDir Normalized direction of the ray.
\param[in] maxDist Maximum ray length, has to be in the [0, inf) range
\param[in] cb Raycast callback, called once per hit
\param[in] queryFlags Optional flags controlling the query.
\return false if query has been aborted
*/
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, float maxDist, RaycastCallback& cb, PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT) const = 0;
/**
\brief Sweep test against a BVH.
\param[in] geom The query volume
\param[in] pose The pose of the query volume
\param[in] unitDir Normalized direction of the sweep.
\param[in] maxDist Maximum sweep length, has to be in the [0, inf) range
\param[in] cb Raycast callback, called once per hit
\param[in] queryFlags Optional flags controlling the query.
\return false if query has been aborted
*/
virtual bool sweep(const PxGeometry& geom, const PxTransform& pose, const PxVec3& unitDir, float maxDist, RaycastCallback& cb, PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT) const = 0;
/**
\brief Overlap test against a BVH.
\param[in] geom The query volume
\param[in] pose The pose of the query volume
\param[in] cb Overlap callback, called once per hit
\param[in] queryFlags Optional flags controlling the query.
\return false if query has been aborted
*/
virtual bool overlap(const PxGeometry& geom, const PxTransform& pose, OverlapCallback& cb, PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT) const = 0;
/**
\brief Frustum culling test against a BVH.
This is similar in spirit to an overlap query using a convex object around the frustum.
However this specialized query has better performance, and can support more than the 6 planes
of a frustum, which can be useful in portal-based engines.
On the other hand this test only returns a conservative number of bounds, i.e. some of the returned
bounds may actually be outside the frustum volume, close to it but not touching it. This is usually
an ok performance trade-off when the function is used for view-frustum culling.
\param[in] nbPlanes Number of planes. Only 32 planes max are supported.
\param[in] planes Array of planes, should be in the same space as the BVH.
\param[in] cb Overlap callback, called once per visible object
\param[in] queryFlags Optional flags controlling the query.
\return false if query has been aborted
*/
virtual bool cull(PxU32 nbPlanes, const PxPlane* planes, OverlapCallback& cb, PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT) const = 0;
/**
\brief Returns the number of bounds in the BVH.
You can use #getBounds() to retrieve the bounds.
\note These are the user-defined bounds passed to the BVH builder, not the internal bounds around each BVH node.
\return Number of bounds in the BVH.
@see getBounds() getBoundsForModification()
*/
virtual PxU32 getNbBounds() const = 0;
/**
\brief Retrieve the read-only bounds in the BVH.
\note These are the user-defined bounds passed to the BVH builder, not the internal bounds around each BVH node.
@see PxBounds3 getNbBounds() getBoundsForModification()
*/
virtual const PxBounds3* getBounds() const = 0;
/**
\brief Retrieve the bounds in the BVH.
These bounds can be modified. Call refit() after modifications are done.
\note These are the user-defined bounds passed to the BVH builder, not the internal bounds around each BVH node.
@see PxBounds3 getNbBounds() getBounds() refit() updateBounds() partialRefit()
*/
PX_FORCE_INLINE PxBounds3* getBoundsForModification()
{
return const_cast<PxBounds3*>(getBounds());
}
/**
\brief Refit the BVH.
This function "refits" the tree, i.e. takes the new (leaf) bounding boxes into account and
recomputes all the BVH bounds accordingly. This is an O(n) operation with n = number of bounds in the BVH.
This works best with minor bounds modifications, i.e. when the bounds remain close to their initial values.
With large modifications the tree quality degrades more and more, and subsequent query performance suffers.
It might be a better strategy to create a brand new BVH if bounds change drastically.
This function refits the whole tree after an arbitrary number of bounds have potentially been modified by
users (via getBoundsForModification()). If you only have a small number of bounds to update, it might be
more efficient to use setBounds() and partialRefit() instead.
@see getNbBounds() getBoundsForModification() updateBounds() partialRefit()
*/
virtual void refit() = 0;
/**
\brief Update single bounds.
This is an alternative to getBoundsForModification() / refit(). If you only have a small set of bounds to
update, it can be inefficient to call the refit() function, because it refits the whole BVH.
Instead, one can update individual bounds with this updateBounds() function. It sets the new bounds and
marks the corresponding BVH nodes for partial refit. Once all the individual bounds have been updated,
call partialRefit() to only refit the subset of marked nodes.
\param[in] boundsIndex Index of updated bounds. Valid range is between 0 and getNbBounds().
\param[in] newBounds Updated bounds.
\return true if success
@see getNbBounds() getBoundsForModification() refit() partialRefit()
*/
virtual bool updateBounds(PxU32 boundsIndex, const PxBounds3& newBounds) = 0;
/**
\brief Refits subset of marked nodes.
This is an alternative to the refit() function, to be called after updateBounds() calls.
See updateBounds() for details.
@see getNbBounds() getBoundsForModification() refit() updateBounds()
*/
virtual void partialRefit() = 0;
/**
\brief Generic BVH traversal function.
This can be used to implement custom BVH traversal functions if provided ones are not enough.
In particular this can be used to visualize the tree's bounds.
\param[in] cb Traversal callback, called for each visited node
\return false if query has been aborted
*/
virtual bool traverse(TraversalCallback& cb) const = 0;
virtual const char* getConcreteTypeName() const { return "PxBVH"; }
protected:
PX_INLINE PxBVH(PxType concreteType, PxBaseFlags baseFlags) : PxBase(concreteType, baseFlags) {}
PX_INLINE PxBVH(PxBaseFlags baseFlags) : PxBase(baseFlags) {}
virtual ~PxBVH() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxBVH", PxBase); }
};
struct PxGeomIndexPair;
/**
\brief BVH-vs-BVH overlap test
This function returns pairs of box indices that belong to both the first & second input bvhs.
\param[in] callback The callback object used to report results
\param[in] bvh0 First bvh
\param[in] bvh1 Second bvh
\return true if an overlap has been detected
@see PxBVH PxReportCallback
*/
PX_C_EXPORT PX_PHYSX_COMMON_API bool PX_CALL_CONV PxFindOverlap(PxReportCallback<PxGeomIndexPair>& callback, const PxBVH& bvh0, const PxBVH& bvh1);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 10,915 | C | 37.167832 | 206 | 0.745854 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGeometryHelpers.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GEOMETRY_HELPERS_H
#define PX_GEOMETRY_HELPERS_H
/** \addtogroup geomutils
@{
*/
#include "foundation/PxPlane.h"
#include "foundation/PxTransform.h"
#include "foundation/PxUnionCast.h"
#include "common/PxPhysXCommonConfig.h"
#include "geometry/PxGeometry.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include "geometry/PxPlaneGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxHeightFieldGeometry.h"
#include "geometry/PxParticleSystemGeometry.h"
#include "geometry/PxHairSystemGeometry.h"
#include "geometry/PxTetrahedronMeshGeometry.h"
#include "geometry/PxCustomGeometry.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Geometry holder class
This class contains enough space to hold a value of any PxGeometry subtype.
Its principal use is as a convenience class to allow geometries to be returned polymorphically from functions.
*/
PX_ALIGN_PREFIX(4)
class PxGeometryHolder
{
class PxInvalidGeometry : public PxGeometry
{
public:
PX_INLINE PxInvalidGeometry() : PxGeometry(PxGeometryType::eINVALID) {}
};
public:
PX_FORCE_INLINE PxGeometryType::Enum getType() const
{
return any().getType();
}
PX_FORCE_INLINE PxGeometry& any()
{
return *PxUnionCast<PxGeometry*>(&bytes.geometry);
}
PX_FORCE_INLINE const PxGeometry& any() const
{
return *PxUnionCast<const PxGeometry*>(&bytes.geometry);
}
//! @cond
PX_FORCE_INLINE PxSphereGeometry& sphere() { return get<PxSphereGeometry, PxGeometryType::eSPHERE>(); }
PX_FORCE_INLINE const PxSphereGeometry& sphere() const { return get<const PxSphereGeometry, PxGeometryType::eSPHERE>(); }
PX_FORCE_INLINE PxPlaneGeometry& plane() { return get<PxPlaneGeometry, PxGeometryType::ePLANE>(); }
PX_FORCE_INLINE const PxPlaneGeometry& plane() const { return get<const PxPlaneGeometry, PxGeometryType::ePLANE>(); }
PX_FORCE_INLINE PxCapsuleGeometry& capsule() { return get<PxCapsuleGeometry, PxGeometryType::eCAPSULE>(); }
PX_FORCE_INLINE const PxCapsuleGeometry& capsule() const { return get<const PxCapsuleGeometry, PxGeometryType::eCAPSULE>(); }
PX_FORCE_INLINE PxBoxGeometry& box() { return get<PxBoxGeometry, PxGeometryType::eBOX>(); }
PX_FORCE_INLINE const PxBoxGeometry& box() const { return get<const PxBoxGeometry, PxGeometryType::eBOX>(); }
PX_FORCE_INLINE PxConvexMeshGeometry& convexMesh() { return get<PxConvexMeshGeometry, PxGeometryType::eCONVEXMESH>(); }
PX_FORCE_INLINE const PxConvexMeshGeometry& convexMesh() const { return get<const PxConvexMeshGeometry, PxGeometryType::eCONVEXMESH>(); }
PX_FORCE_INLINE PxTetrahedronMeshGeometry& tetMesh() { return get<PxTetrahedronMeshGeometry, PxGeometryType::eTETRAHEDRONMESH>(); }
PX_FORCE_INLINE const PxTetrahedronMeshGeometry& tetMesh() const { return get<const PxTetrahedronMeshGeometry, PxGeometryType::eTETRAHEDRONMESH>(); }
PX_FORCE_INLINE PxTriangleMeshGeometry& triangleMesh() { return get<PxTriangleMeshGeometry, PxGeometryType::eTRIANGLEMESH>(); }
PX_FORCE_INLINE const PxTriangleMeshGeometry& triangleMesh() const { return get<const PxTriangleMeshGeometry, PxGeometryType::eTRIANGLEMESH>(); }
PX_FORCE_INLINE PxHeightFieldGeometry& heightField() { return get<PxHeightFieldGeometry, PxGeometryType::eHEIGHTFIELD>(); }
PX_FORCE_INLINE const PxHeightFieldGeometry& heightField() const { return get<const PxHeightFieldGeometry, PxGeometryType::eHEIGHTFIELD>(); }
PX_FORCE_INLINE PxParticleSystemGeometry& particleSystem() { return get<PxParticleSystemGeometry, PxGeometryType::ePARTICLESYSTEM>(); }
PX_FORCE_INLINE const PxParticleSystemGeometry& particleSystem() const { return get<const PxParticleSystemGeometry, PxGeometryType::ePARTICLESYSTEM>(); }
PX_FORCE_INLINE PxHairSystemGeometry& hairSystem() { return get<PxHairSystemGeometry, PxGeometryType::eHAIRSYSTEM>(); }
PX_FORCE_INLINE const PxHairSystemGeometry& hairSystem() const { return get<const PxHairSystemGeometry, PxGeometryType::eHAIRSYSTEM>(); }
PX_FORCE_INLINE PxCustomGeometry& custom() { return get<PxCustomGeometry, PxGeometryType::eCUSTOM>(); }
PX_FORCE_INLINE const PxCustomGeometry& custom() const { return get<const PxCustomGeometry, PxGeometryType::eCUSTOM>(); }
//! @endcond
PX_FORCE_INLINE void storeAny(const PxGeometry& geometry)
{
PX_ASSERT_WITH_MESSAGE( (geometry.getType() >= PxGeometryType::eSPHERE) &&
(geometry.getType() < PxGeometryType::eGEOMETRY_COUNT),
"Unexpected GeometryType in PxGeometryHolder::storeAny");
switch(geometry.getType())
{
case PxGeometryType::eSPHERE: put<PxSphereGeometry>(geometry); break;
case PxGeometryType::ePLANE: put<PxPlaneGeometry>(geometry); break;
case PxGeometryType::eCAPSULE: put<PxCapsuleGeometry>(geometry); break;
case PxGeometryType::eBOX: put<PxBoxGeometry>(geometry); break;
case PxGeometryType::eCONVEXMESH: put<PxConvexMeshGeometry>(geometry); break;
case PxGeometryType::eTRIANGLEMESH: put<PxTriangleMeshGeometry>(geometry); break;
case PxGeometryType::eTETRAHEDRONMESH: put<PxTetrahedronMeshGeometry>(geometry); break;
case PxGeometryType::eHEIGHTFIELD: put<PxHeightFieldGeometry>(geometry); break;
case PxGeometryType::ePARTICLESYSTEM: put<PxParticleSystemGeometry>(geometry); break;
case PxGeometryType::eHAIRSYSTEM: put<PxHairSystemGeometry>(geometry); break;
case PxGeometryType::eCUSTOM: put<PxCustomGeometry>(geometry); break;
case PxGeometryType::eGEOMETRY_COUNT:
case PxGeometryType::eINVALID: break;
}
}
PX_FORCE_INLINE PxGeometryHolder() { put<PxInvalidGeometry>(PxInvalidGeometry()); }
PX_FORCE_INLINE PxGeometryHolder(const PxGeometry& geometry){ storeAny(geometry); }
private:
template<typename T> void put(const PxGeometry& geometry)
{
static_cast<T&>(any()) = static_cast<const T&>(geometry);
}
template<typename T, PxGeometryType::Enum type> T& get()
{
PX_ASSERT(getType() == type);
return static_cast<T&>(any());
}
template<typename T, PxGeometryType::Enum type> T& get() const
{
PX_ASSERT(getType() == type);
return static_cast<T&>(any());
}
union {
PxU8 geometry[sizeof(PxGeometry)];
PxU8 box[sizeof(PxBoxGeometry)];
PxU8 sphere[sizeof(PxSphereGeometry)];
PxU8 capsule[sizeof(PxCapsuleGeometry)];
PxU8 plane[sizeof(PxPlaneGeometry)];
PxU8 convex[sizeof(PxConvexMeshGeometry)];
PxU8 tetMesh[sizeof(PxTetrahedronMeshGeometry)];
PxU8 mesh[sizeof(PxTriangleMeshGeometry)];
PxU8 heightfield[sizeof(PxHeightFieldGeometry)];
PxU8 particleSystem[sizeof(PxParticleSystemGeometry)];
PxU8 hairSystem[sizeof(PxHairSystemGeometry)];
PxU8 custom[sizeof(PxCustomGeometry)];
} bytes;
}
PX_ALIGN_SUFFIX(4);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 8,630 | C | 43.953125 | 155 | 0.749826 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxHeightFieldGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_HEIGHT_FIELD_GEOMETRY_H
#define PX_HEIGHT_FIELD_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxTriangleMeshGeometry.h"
#include "common/PxCoreUtilityTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
#define PX_MIN_HEIGHTFIELD_XZ_SCALE 1e-8f
#define PX_MIN_HEIGHTFIELD_Y_SCALE (0.0001f / PxReal(0xFFFF))
class PxHeightField;
/**
\brief Height field geometry class.
This class allows to create a scaled height field geometry instance.
There is a minimum allowed value for Y and XZ scaling - PX_MIN_HEIGHTFIELD_XZ_SCALE, heightfield creation will fail if XZ value is below this value.
*/
class PxHeightFieldGeometry : public PxGeometry
{
public:
/**
\brief Constructor.
*/
PX_INLINE PxHeightFieldGeometry(PxHeightField* hf = NULL,
PxMeshGeometryFlags flags = PxMeshGeometryFlag::Enum(0),
PxReal heightScale_ = 1.0f,
PxReal rowScale_ = 1.0f,
PxReal columnScale_ = 1.0f) :
PxGeometry (PxGeometryType::eHEIGHTFIELD),
heightField (hf),
heightScale (heightScale_),
rowScale (rowScale_),
columnScale (columnScale_),
heightFieldFlags (flags)
{
}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxHeightFieldGeometry(const PxHeightFieldGeometry& that) :
PxGeometry (that),
heightField (that.heightField),
heightScale (that.heightScale),
rowScale (that.rowScale),
columnScale (that.columnScale),
heightFieldFlags (that.heightFieldFlags)
{
}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxHeightFieldGeometry& that)
{
mType = that.mType;
heightField = that.heightField;
heightScale = that.heightScale;
rowScale = that.rowScale;
columnScale = that.columnScale;
heightFieldFlags = that.heightFieldFlags;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid
\note A valid height field has a positive scale value in each direction (heightScale > 0, rowScale > 0, columnScale > 0).
It is illegal to call PxRigidActor::createShape and PxPhysics::createShape with a height field that has zero extents in any direction.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_INLINE bool isValid() const;
public:
/**
\brief The height field data.
*/
PxHeightField* heightField;
/**
\brief The scaling factor for the height field in vertical direction (y direction in local space).
*/
PxReal heightScale;
/**
\brief The scaling factor for the height field in the row direction (x direction in local space).
*/
PxReal rowScale;
/**
\brief The scaling factor for the height field in the column direction (z direction in local space).
*/
PxReal columnScale;
/**
\brief Flags to specify some collision properties for the height field.
*/
PxMeshGeometryFlags heightFieldFlags;
PxPadding<3> paddingFromFlags; //!< padding for mesh flags.
};
PX_INLINE bool PxHeightFieldGeometry::isValid() const
{
if(mType != PxGeometryType::eHEIGHTFIELD)
return false;
if(!PxIsFinite(heightScale) || !PxIsFinite(rowScale) || !PxIsFinite(columnScale))
return false;
if(rowScale < PX_MIN_HEIGHTFIELD_XZ_SCALE || columnScale < PX_MIN_HEIGHTFIELD_XZ_SCALE || heightScale < PX_MIN_HEIGHTFIELD_Y_SCALE)
return false;
if(!heightField)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 5,090 | C | 30.042683 | 148 | 0.738114 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxTriangle.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TRIANGLE_H
#define PX_TRIANGLE_H
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVec3.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Triangle class.
*/
class PxTriangle
{
public:
/**
\brief Constructor
*/
PX_FORCE_INLINE PxTriangle() {}
/**
\brief Constructor
\param[in] p0 Point 0
\param[in] p1 Point 1
\param[in] p2 Point 2
*/
PX_FORCE_INLINE PxTriangle(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2)
{
verts[0] = p0;
verts[1] = p1;
verts[2] = p2;
}
/**
\brief Copy constructor
\param[in] triangle Tri to copy
*/
PX_FORCE_INLINE PxTriangle(const PxTriangle& triangle)
{
verts[0] = triangle.verts[0];
verts[1] = triangle.verts[1];
verts[2] = triangle.verts[2];
}
/**
\brief Destructor
*/
PX_FORCE_INLINE ~PxTriangle() {}
/**
\brief Assignment operator
*/
PX_FORCE_INLINE void operator=(const PxTriangle& triangle)
{
verts[0] = triangle.verts[0];
verts[1] = triangle.verts[1];
verts[2] = triangle.verts[2];
}
/**
\brief Compute the normal of the Triangle.
\param[out] _normal Triangle normal.
*/
PX_FORCE_INLINE void normal(PxVec3& _normal) const
{
_normal = (verts[1]-verts[0]).cross(verts[2]-verts[0]);
_normal.normalize();
}
/**
\brief Compute the unnormalized normal of the triangle.
\param[out] _normal Triangle normal (not normalized).
*/
PX_FORCE_INLINE void denormalizedNormal(PxVec3& _normal) const
{
_normal = (verts[1]-verts[0]).cross(verts[2]-verts[0]);
}
/**
\brief Compute the area of the triangle.
\return Area of the triangle.
*/
PX_FORCE_INLINE PxReal area() const
{
const PxVec3& p0 = verts[0];
const PxVec3& p1 = verts[1];
const PxVec3& p2 = verts[2];
return ((p0 - p1).cross(p0 - p2)).magnitude() * 0.5f;
}
/**
\return Computes a point on the triangle from u and v barycentric coordinates.
*/
PX_FORCE_INLINE PxVec3 pointFromUV(PxReal u, PxReal v) const
{
return (1.0f-u-v)*verts[0] + u*verts[1] + v*verts[2];
}
/**
\brief Array of Vertices.
*/
PxVec3 verts[3];
};
//! A padded version of PxTriangle, to safely load its data using SIMD
class PxTrianglePadded : public PxTriangle
{
public:
PX_FORCE_INLINE PxTrianglePadded() {}
PX_FORCE_INLINE ~PxTrianglePadded() {}
PxU32 padding;
};
#if !PX_DOXYGEN
}
#endif
/** @} */
#endif
| 4,047 | C | 24.620253 | 83 | 0.703237 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGeometryInternal.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GEOMETRY_INTERNAL_H
#define PX_GEOMETRY_INTERNAL_H
/** \addtogroup geomutils
@{ */
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVec3.h"
#include "geometry/PxTriangleMesh.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxTriangleMesh;
struct PxTriangleMeshInternalData
{
PxU32 mNbVertices;
PxU32 mNbTriangles;
PxVec3* mVertices;
void* mTriangles;
PxU32* mFaceRemap;
PxVec3 mAABB_Center;
PxVec3 mAABB_Extents;
PxReal mGeomEpsilon;
PxU8 mFlags;
//
PxU32 mNbNodes;
PxU32 mNodeSize;
void* mNodes;
PxU32 mInitData;
PxVec3 mCenterOrMinCoeff;
PxVec3 mExtentsOrMaxCoeff;
bool mQuantized;
PX_FORCE_INLINE PxU32 getSizeofVerticesInBytes() const
{
return mNbVertices * sizeof(PxVec3);
}
PX_FORCE_INLINE PxU32 getSizeofTrianglesInBytes() const
{
const PxU32 triangleSize = mFlags & PxTriangleMeshFlag::e16_BIT_INDICES ? sizeof(PxU16) : sizeof(PxU32);
return mNbTriangles * 3 * triangleSize;
}
PX_FORCE_INLINE PxU32 getSizeofFaceRemapInBytes() const
{
return mNbTriangles * sizeof(PxU32);
}
PX_FORCE_INLINE PxU32 getSizeofNodesInBytes() const
{
return mNbNodes * mNodeSize;
}
};
PX_C_EXPORT PX_PHYSX_COMMON_API bool PX_CALL_CONV PxGetTriangleMeshInternalData(PxTriangleMeshInternalData& data, const PxTriangleMesh& mesh, bool takeOwnership);
class PxBVH;
struct PxBVHInternalData
{
PxU32 mNbIndices;
PxU32 mNbNodes;
PxU32 mNodeSize;
void* mNodes;
PxU32* mIndices; // Can be null
void* mBounds;
PX_FORCE_INLINE PxU32 getSizeofNodesInBytes() const
{
return mNbNodes * mNodeSize;
}
PX_FORCE_INLINE PxU32 getSizeofIndicesInBytes() const
{
return mNbIndices * sizeof(PxU32);
}
PX_FORCE_INLINE PxU32 getSizeofBoundsInBytes() const
{
return (mNbIndices+1)*6;
}
};
PX_C_EXPORT PX_PHYSX_COMMON_API bool PX_CALL_CONV PxGetBVHInternalData(PxBVHInternalData& data, const PxBVH& bvh, bool takeOwnership);
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 3,708 | C | 28.91129 | 163 | 0.748112 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxHairSystemDesc.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_HAIRSYSTEM_DESC_H
#define PX_HAIRSYSTEM_DESC_H
/** \addtogroup geomutils
@{
*/
#include "foundation/PxFlags.h"
#include "common/PxCoreUtilityTypes.h"
#if PX_ENABLE_FEATURES_UNDER_CONSTRUCTION
#if !PX_DOXYGEN
namespace physx
{
#endif
struct PxHairSystemDescFlag
{
enum Enum
{
/**
Determines whether or not to allocate memory on device (GPU) or on Host (CPU)
*/
eDEVICE_MEMORY = (1<<0)
};
};
/**
\brief collection of set bits defined in PxHairSystemDescFlag
\see PxHairSystemDescFlag
*/
typedef PxFlags<PxHairSystemDescFlag::Enum, PxU16> PxHairSystemDescFlags;
PX_FLAGS_OPERATORS(PxHairSystemDescFlag::Enum, PxU16)
/**
\brief Descriptor class for #PxHairSystem
\note The data is *copied* when a PxHairSystem object is created from this
descriptor. The user may discard the data after the call.
\see PxHairSystem PxHairSystemGeometry PxShape PxPhysics.createHairSystem()
PxCooking.createHairSystem()
*/
class PxHairSystemDesc
{
public:
/**
\brief The number of strands in this hair system
<b>Default:</b> 0
*/
PxU32 numStrands;
/**
\brief The length of a hair segment
<b>Default:</b> 0.1
*/
PxReal segmentLength;
/**
\brief The radius of a hair segment
<b>Default:</b> 0.01
*/
PxReal segmentRadius;
/**
\brief Specifies the number of vertices each strand is composed of.
Length must be equal to numStrands, elements assumed to be of
type PxU32. Number of segments = numVerticesPerStrand - 1.
<b>Default:</b> NULL
*/
PxBoundedData numVerticesPerStrand;
/**
\brief Vertex positions and inverse mass [x,y,z,1/m] in PxBoundedData format.
If count equal to numStrands, assumed to be strand root positions,
otherwise positions of all vertices sorted by strands and increasing
from root towards tip of strand.
Type assumed to be of PxReal.
<b>Default:</b> NULL
*/
PxBoundedData vertices;
/**
\brief Vertex velocities in PxBoundedData format.
If NULL, zero velocity is assumed.
Type assumed to be of PxReal.
<b>Default:</b> NULL
*/
PxBoundedData velocities;
/**
\brief Flags bits, combined from values of the enum ::PxHairSystemDesc
<b>Default:</b> 0
*/
PxHairSystemDescFlags flags;
/**
\brief Constructor with default initialization
*/
PX_INLINE PxHairSystemDesc();
/**
\brief (re)sets the structure to the default.
*/
PX_INLINE void setToDefault();
/**
\brief Check whether the descriptor is valid
\return True if the current settings are valid
*/
PX_INLINE bool isValid() const;
};
PX_INLINE PxHairSystemDesc::PxHairSystemDesc()
{
numStrands = 0;
segmentLength = 0.1f;
segmentRadius = 0.01f;
}
PX_INLINE void PxHairSystemDesc::setToDefault()
{
*this = PxHairSystemDesc();
}
PX_INLINE bool PxHairSystemDesc::isValid() const
{
if (segmentLength < 0.0f || segmentRadius < 0.0f)
return false;
if (2.0f * segmentRadius >= segmentLength)
return false;
if (numStrands == 0)
return false;
if (numVerticesPerStrand.count != numStrands)
return false;
PxU32 totalNumVertices = 0;
for (PxU32 i = 0; i < numVerticesPerStrand.count; i++)
{
const PxU32 numVertices = numVerticesPerStrand.at<PxU32>(i);
totalNumVertices += numVertices;
if (numVertices < 2)
{
return false;
}
}
if (vertices.count != totalNumVertices && vertices.count != numStrands)
return false;
if (velocities.count != totalNumVertices && velocities.count != 0)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif
/** @} */
#endif
| 5,151 | C | 24.631841 | 80 | 0.717919 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGeometryQuery.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GEOMETRY_QUERY_H
#define PX_GEOMETRY_QUERY_H
/**
\brief Maximum sweep distance for scene sweeps. The distance parameter for sweep functions will be clamped to this value.
The reason for this is GJK support cannot be evaluated near infinity. A viable alternative can be a sweep followed by an infinite raycast.
@see PxScene
*/
#define PX_MAX_SWEEP_DISTANCE 1e8f
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "geometry/PxGeometryHit.h"
#include "geometry/PxGeometryQueryFlags.h"
#include "geometry/PxGeometryQueryContext.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxGeometry;
class PxContactBuffer;
/**
\brief Collection of geometry object queries (sweeps, raycasts, overlaps, ...).
*/
class PxGeometryQuery
{
public:
/**
\brief Raycast test against a geometry object.
All geometry types are supported except PxParticleSystemGeometry, PxTetrahedronMeshGeometry and PxHairSystemGeometry.
\param[in] origin The origin of the ray to test the geometry object against
\param[in] unitDir Normalized direction of the ray to test the geometry object against
\param[in] geom The geometry object to test the ray against
\param[in] pose Pose of the geometry object
\param[in] maxDist Maximum ray length, has to be in the [0, inf) range
\param[in] hitFlags Specification of the kind of information to retrieve on hit. Combination of #PxHitFlag flags
\param[in] maxHits max number of returned hits = size of 'rayHits' buffer
\param[out] rayHits Raycast hits information
\param[in] stride Stride value (in number of bytes) for rayHits array. Typically sizeof(PxGeomRaycastHit) for packed arrays.
\param[in] queryFlags Optional flags controlling the query.
\param[in] threadContext Optional user-defined per-thread context.
\return Number of hits between the ray and the geometry object
@see PxGeomRaycastHit PxGeometry PxTransform
*/
PX_PHYSX_COMMON_API static PxU32 raycast( const PxVec3& origin, const PxVec3& unitDir,
const PxGeometry& geom, const PxTransform& pose,
PxReal maxDist, PxHitFlags hitFlags,
PxU32 maxHits, PxGeomRaycastHit* PX_RESTRICT rayHits, PxU32 stride = sizeof(PxGeomRaycastHit), PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT,
PxRaycastThreadContext* threadContext = NULL);
/**
\brief Overlap test for two geometry objects.
All combinations are supported except:
\li PxPlaneGeometry vs. {PxPlaneGeometry, PxTriangleMeshGeometry, PxHeightFieldGeometry}
\li PxTriangleMeshGeometry vs. PxHeightFieldGeometry
\li PxHeightFieldGeometry vs. PxHeightFieldGeometry
\li Anything involving PxParticleSystemGeometry, PxTetrahedronMeshGeometry or PxHairSystemGeometry.
\param[in] geom0 The first geometry object
\param[in] pose0 Pose of the first geometry object
\param[in] geom1 The second geometry object
\param[in] pose1 Pose of the second geometry object
\param[in] queryFlags Optional flags controlling the query.
\param[in] threadContext Optional user-defined per-thread context.
\return True if the two geometry objects overlap
@see PxGeometry PxTransform
*/
PX_PHYSX_COMMON_API static bool overlap(const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT, PxOverlapThreadContext* threadContext=NULL);
/**
\brief Sweep a specified geometry object in space and test for collision with a given object.
The following combinations are supported.
\li PxSphereGeometry vs. {PxSphereGeometry, PxPlaneGeometry, PxCapsuleGeometry, PxBoxGeometry, PxConvexMeshGeometry, PxTriangleMeshGeometry, PxHeightFieldGeometry}
\li PxCapsuleGeometry vs. {PxSphereGeometry, PxPlaneGeometry, PxCapsuleGeometry, PxBoxGeometry, PxConvexMeshGeometry, PxTriangleMeshGeometry, PxHeightFieldGeometry}
\li PxBoxGeometry vs. {PxSphereGeometry, PxPlaneGeometry, PxCapsuleGeometry, PxBoxGeometry, PxConvexMeshGeometry, PxTriangleMeshGeometry, PxHeightFieldGeometry}
\li PxConvexMeshGeometry vs. {PxSphereGeometry, PxPlaneGeometry, PxCapsuleGeometry, PxBoxGeometry, PxConvexMeshGeometry, PxTriangleMeshGeometry, PxHeightFieldGeometry}
\param[in] unitDir Normalized direction along which object geom0 should be swept
\param[in] maxDist Maximum sweep distance, has to be in the [0, inf) range
\param[in] geom0 The geometry object to sweep. Supported geometries are #PxSphereGeometry, #PxCapsuleGeometry, #PxBoxGeometry and #PxConvexMeshGeometry
\param[in] pose0 Pose of the geometry object to sweep
\param[in] geom1 The geometry object to test the sweep against
\param[in] pose1 Pose of the geometry object to sweep against
\param[out] sweepHit The sweep hit information. Only valid if this method returns true.
\param[in] hitFlags Specify which properties per hit should be computed and written to result hit array. Combination of #PxHitFlag flags
\param[in] inflation Surface of the swept shape is additively extruded in the normal direction, rounding corners and edges.
\param[in] queryFlags Optional flags controlling the query.
\param[in] threadContext Optional user-defined per-thread context.
\return True if the swept geometry object geom0 hits the object geom1
@see PxGeomSweepHit PxGeometry PxTransform
*/
PX_PHYSX_COMMON_API static bool sweep( const PxVec3& unitDir, const PxReal maxDist,
const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
PxGeomSweepHit& sweepHit, PxHitFlags hitFlags = PxHitFlag::eDEFAULT,
const PxReal inflation = 0.0f, PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT,
PxSweepThreadContext* threadContext = NULL);
/**
\brief Compute minimum translational distance (MTD) between two geometry objects.
All combinations of geom objects are supported except:
- plane/plane
- plane/mesh
- plane/heightfield
- mesh/mesh
- mesh/heightfield
- heightfield/heightfield
- anything involving PxParticleSystemGeometry, PxTetrahedronMeshGeometry or PxHairSystemGeometry
The function returns a unit vector ('direction') and a penetration depth ('depth').
The depenetration vector D = direction * depth should be applied to the first object, to
get out of the second object.
Returned depth should always be positive or null.
If objects do not overlap, the function can not compute the MTD and returns false.
\param[out] direction Computed MTD unit direction
\param[out] depth Penetration depth. Always positive or null.
\param[in] geom0 The first geometry object
\param[in] pose0 Pose of the first geometry object
\param[in] geom1 The second geometry object
\param[in] pose1 Pose of the second geometry object
\param[in] queryFlags Optional flags controlling the query.
\return True if the MTD has successfully been computed, i.e. if objects do overlap.
@see PxGeometry PxTransform
*/
PX_PHYSX_COMMON_API static bool computePenetration( PxVec3& direction, PxF32& depth,
const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT);
/**
\brief Computes distance between a point and a geometry object.
Currently supported geometry objects: box, sphere, capsule, convex, mesh.
\note For meshes, only the BVH34 midphase data-structure is supported.
\param[in] point The point P
\param[in] geom The geometry object
\param[in] pose Pose of the geometry object
\param[out] closestPoint Optionally returned closest point to P on the geom object. Only valid when returned distance is strictly positive.
\param[out] closestIndex Optionally returned closest (triangle) index. Only valid for triangle meshes.
\param[in] queryFlags Optional flags controlling the query.
\return Square distance between the point and the geom object, or 0.0 if the point is inside the object, or -1.0 if an error occured (geometry type is not supported, or invalid pose)
@see PxGeometry PxTransform
*/
PX_PHYSX_COMMON_API static PxReal pointDistance(const PxVec3& point, const PxGeometry& geom, const PxTransform& pose,
PxVec3* closestPoint=NULL, PxU32* closestIndex=NULL,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT);
/**
\brief computes the bounds for a geometry object
\param[out] bounds Returned computed bounds
\param[in] geom The geometry object
\param[in] pose Pose of the geometry object
\param[in] offset Offset for computed bounds. This value is added to the geom's extents.
\param[in] inflation Scale factor for computed bounds. The geom's extents are multiplied by this value.
\param[in] queryFlags Optional flags controlling the query.
@see PxGeometry PxTransform
*/
PX_PHYSX_COMMON_API static void computeGeomBounds(PxBounds3& bounds, const PxGeometry& geom, const PxTransform& pose, float offset=0.0f, float inflation=1.0f, PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT);
/**
\brief Generate collision contacts between a convex geometry and a single triangle
\param[in] geom The geometry object. Can be a capsule, a box or a convex mesh
\param[in] pose Pose of the geometry object
\param[in] triangleVertices Triangle vertices in local space
\param[in] triangleIndex Triangle index
\param[in] contactDistance The distance at which contacts begin to be generated between the pairs
\param[in] meshContactMargin The mesh contact margin.
\param[in] toleranceLength The toleranceLength. Used for scaling distance-based thresholds internally to produce appropriate results given simulations in different units
\param[out] contactBuffer A buffer to write contacts to.
\return True if there was collision
*/
PX_PHYSX_COMMON_API static bool generateTriangleContacts(const PxGeometry& geom, const PxTransform& pose, const PxVec3 triangleVertices[3], PxU32 triangleIndex, PxReal contactDistance, PxReal meshContactMargin, PxReal toleranceLength, PxContactBuffer& contactBuffer);
/**
\brief Checks if provided geometry is valid.
\param[in] geom The geometry object.
\return True if geometry is valid.
@see PxGeometry
*/
PX_PHYSX_COMMON_API static bool isValid(const PxGeometry& geom);
};
#if !PX_DOXYGEN
}
#endif
/** @} */
#endif
| 12,120 | C | 46.909091 | 268 | 0.77401 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGeometryQueryContext.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GEOMETRY_QUERY_CONTEXT_H
#define PX_GEOMETRY_QUERY_CONTEXT_H
#include "common/PxPhysXCommonConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief A per-thread context passed to low-level query functions.
This is a user-defined optional parameter that gets passed down to low-level query functions (raycast / overlap / sweep).
This is not used directly in PhysX, although the context in this case is the PxHitCallback used in the query. This allows
user-defined query functions, such as the ones from PxCustomGeometry, to get some additional data about the query. In this
case this is a 'per-query' context rather than 'per-thread', but the initial goal of this parameter is to give custom
query callbacks access to per-thread data structures (e.g. caches) that could be needed to implement the callbacks.
In any case this is mostly for user-controlled query systems.
*/
struct PxQueryThreadContext
{
};
/**
\brief A per-thread context passed to low-level raycast functions.
*/
typedef PxQueryThreadContext PxRaycastThreadContext;
/**
\brief A per-thread context passed to low-level overlap functions.
*/
typedef PxQueryThreadContext PxOverlapThreadContext;
/**
\brief A per-thread context passed to low-level sweep functions.
*/
typedef PxQueryThreadContext PxSweepThreadContext;
#if !PX_DOXYGEN
}
#endif
#endif
| 3,063 | C | 39.853333 | 123 | 0.768528 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGjkQuery.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GJK_QUERY_H
#define PX_GJK_QUERY_H
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVec3.h"
#include "foundation/PxQuat.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Collection of GJK query functions (sweeps, raycasts, overlaps, ...).
*/
class PxGjkQuery
{
public:
/**
\brief Abstract interface for a user defined shape GJK mapping support.
A user defined shape consists of a core shape and a margin. If the distance
between two shapes' cores is equal to the sum of their margins, these shapes are
considered touching.
*/
struct Support
{
/* Virtual destructor */
virtual ~Support() {}
/**
\brief Return the user defined shape margin. Margin should be greater than or equal to 0
\return Margin.
*/
virtual PxReal getMargin() const = 0;
/**
\brief Return the farthest point on the user defined shape's core in given direction.
\param[in] dir Direction
\return Farthest point in given direction.
*/
virtual PxVec3 supportLocal(const PxVec3& dir) const = 0;
};
/**
\brief Computes proximity information for two shapes using GJK-EPA algorithm
\param[in] a Shape A support mapping
\param[in] b Shape B support mapping
\param[in] poseA Shape A transformation
\param[in] poseB Shape B transformation
\param[in] contactDistance The distance at which proximity info begins to be computed between the shapes
\param[in] toleranceLength The toleranceLength. Used for scaling distance-based thresholds internally to produce appropriate results given simulations in different units
\param[out] pointA The closest/deepest point on shape A surface
\param[out] pointB The closest/deepest point on shape B surface
\param[out] separatingAxis Translating shape B along 'separatingAxis' by 'separation' makes the shapes touching
\param[out] separation Translating shape B along 'separatingAxis' by 'separation' makes the shapes touching
\return False if the distance greater than contactDistance.
*/
PX_PHYSX_COMMON_API static bool proximityInfo(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB,
PxReal contactDistance, PxReal toleranceLength, PxVec3& pointA, PxVec3& pointB, PxVec3& separatingAxis, PxReal& separation);
/**
\brief Raycast test against the given shape.
\param[in] shape Shape support mapping
\param[in] pose Shape transformation
\param[in] rayStart The start point of the ray to test the shape against
\param[in] unitDir Normalized direction of the ray to test the shape against
\param[in] maxDist Maximum ray length, has to be in the [0, inf) range
\param[out] t Hit distance
\param[out] n Hit normal
\param[out] p Hit point
\return True if there is a hit.
*/
PX_PHYSX_COMMON_API static bool raycast(const Support& shape, const PxTransform& pose, const PxVec3& rayStart,
const PxVec3& unitDir, PxReal maxDist, PxReal& t, PxVec3& n, PxVec3& p);
/**
\brief Overlap test for two shapes.
\param[in] a Shape A support mapping
\param[in] b Shape B support mapping
\param[in] poseA Shape A transformation
\param[in] poseB Shape B transformation
\return True if the shapes overlap.
*/
PX_PHYSX_COMMON_API static bool overlap(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB);
/**
\brief Sweep the shape B in space and test for collision with the shape A.
\param[in] a Shape A support mapping
\param[in] b Shape B support mapping
\param[in] poseA Shape A transformation
\param[in] poseB Shape B transformation
\param[in] unitDir Normalized direction of the ray to test the shape against
\param[in] maxDist Maximum ray length, has to be in the [0, inf) range
\param[out] t Hit distance
\param[out] n Hit normal
\param[out] p Hit point
\return True if there is a hit.
*/
PX_PHYSX_COMMON_API static bool sweep(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB,
const PxVec3& unitDir, PxReal maxDist, PxReal& t, PxVec3& n, PxVec3& p);
};
#if !PX_DOXYGEN
}
#endif
#endif
| 5,789 | C | 38.657534 | 170 | 0.744515 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxTetrahedronMeshGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TETRAHEDRON_GEOMETRY_H
#define PX_TETRAHEDRON_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#include "geometry/PxMeshScale.h"
#include "common/PxCoreUtilityTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxTetrahedronMesh;
/**
\brief Tetrahedron mesh geometry class.
This class wraps a tetrahedron mesh such that it can be used in contexts where a PxGeometry type is needed.
*/
class PxTetrahedronMeshGeometry : public PxGeometry
{
public:
/**
\brief Constructor. By default creates an empty object with a NULL mesh and identity scale.
*/
PX_INLINE PxTetrahedronMeshGeometry(PxTetrahedronMesh* mesh = NULL) :
PxGeometry(PxGeometryType::eTETRAHEDRONMESH),
tetrahedronMesh(mesh)
{}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxTetrahedronMeshGeometry(const PxTetrahedronMeshGeometry& that) :
PxGeometry(that),
tetrahedronMesh(that.tetrahedronMesh)
{}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxTetrahedronMeshGeometry& that)
{
mType = that.mType;
tetrahedronMesh = that.tetrahedronMesh;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid for shape creation.
\note A valid tetrahedron mesh has a positive scale value in each direction (scale.scale.x > 0, scale.scale.y > 0, scale.scale.z > 0).
It is illegal to call PxRigidActor::createShape and PxPhysics::createShape with a tetrahedron mesh that has zero extents in any direction.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_INLINE bool isValid() const;
public:
PxTetrahedronMesh* tetrahedronMesh; //!< A reference to the mesh object.
};
PX_INLINE bool PxTetrahedronMeshGeometry::isValid() const
{
if(mType != PxGeometryType::eTETRAHEDRONMESH)
return false;
if(!tetrahedronMesh)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 3,699 | C | 32.035714 | 140 | 0.747229 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxMeshQuery.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MESH_QUERY_H
#define PX_MESH_QUERY_H
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "geometry/PxGeometryHit.h"
#include "geometry/PxGeometryQueryFlags.h"
#include "geometry/PxReportCallback.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxGeometry;
class PxConvexMeshGeometry;
class PxTriangleMeshGeometry;
class PxHeightFieldGeometry;
class PxTriangle;
struct PxMeshMeshQueryFlag
{
enum Enum
{
eDEFAULT = 0, //!< Report all overlaps
eDISCARD_COPLANAR = (1<<0), //!< Ignore coplanar triangle-triangle overlaps
eRESERVED = (1<<1), //!< Reserved flag
eRESERVED1 = (1<<1), //!< Reserved flag
eRESERVED2 = (1<<2), //!< Reserved flag
eRESERVED3 = (1<<3) //!< Reserved flag
};
};
PX_FLAGS_TYPEDEF(PxMeshMeshQueryFlag, PxU32)
class PxMeshQuery
{
public:
/**
\brief Retrieves triangle data from a triangle ID.
This function can be used together with #findOverlapTriangleMesh() to retrieve triangle properties.
\param[in] triGeom Geometry of the triangle mesh to extract the triangle from.
\param[in] transform Transform for the triangle mesh
\param[in] triangleIndex The index of the triangle to retrieve.
\param[out] triangle Triangle points in world space.
\param[out] vertexIndices Returned vertex indices for given triangle
\param[out] adjacencyIndices Returned 3 triangle adjacency internal face indices (0xFFFFFFFF if no adjacency). The mesh must be cooked with cooking param buildTriangleAdjacencies enabled.
\note This function will flip the triangle normal whenever triGeom.scale.hasNegativeDeterminant() is true.
@see PxTriangle PxTriangleFlags PxTriangleID findOverlapTriangleMesh()
*/
PX_PHYSX_COMMON_API static void getTriangle(const PxTriangleMeshGeometry& triGeom, const PxTransform& transform, PxTriangleID triangleIndex, PxTriangle& triangle, PxU32* vertexIndices=NULL, PxU32* adjacencyIndices=NULL);
/**
\brief Retrieves triangle data from a triangle ID.
This function can be used together with #findOverlapHeightField() to retrieve triangle properties.
\param[in] hfGeom Geometry of the height field to extract the triangle from.
\param[in] transform Transform for the height field.
\param[in] triangleIndex The index of the triangle to retrieve.
\param[out] triangle Triangle points in world space.
\param[out] vertexIndices Returned vertex indices for given triangle
\param[out] adjacencyIndices Returned 3 triangle adjacency triangle indices (0xFFFFFFFF if no adjacency).
\note This function will flip the triangle normal whenever triGeom.scale.hasNegativeDeterminant() is true.
\note TriangleIndex is an index used in internal format, which does have an index out of the bounds in last row.
To traverse all tri indices in the HF, the following code can be applied:
for (PxU32 row = 0; row < (nbRows - 1); row++)
{
for (PxU32 col = 0; col < (nbCols - 1); col++)
{
for (PxU32 k = 0; k < 2; k++)
{
const PxU32 triIndex = 2 * (row*nbCols + col) + k;
....
}
}
}
@see PxTriangle PxTriangleFlags PxTriangleID findOverlapHeightField()
*/
PX_PHYSX_COMMON_API static void getTriangle(const PxHeightFieldGeometry& hfGeom, const PxTransform& transform, PxTriangleID triangleIndex, PxTriangle& triangle, PxU32* vertexIndices=NULL, PxU32* adjacencyIndices=NULL);
/**
\brief Find the mesh triangles which touch the specified geometry object.
For mesh-vs-mesh overlap tests, please use the specialized function below.
Returned triangle indices can be used with #getTriangle() to retrieve the triangle properties.
\param[in] geom The geometry object to test for mesh triangle overlaps. Supported geometries are #PxSphereGeometry, #PxCapsuleGeometry and #PxBoxGeometry
\param[in] geomPose Pose of the geometry object
\param[in] meshGeom The triangle mesh geometry to check overlap against
\param[in] meshPose Pose of the triangle mesh
\param[out] results Indices of overlapping triangles
\param[in] maxResults Size of 'results' buffer
\param[in] startIndex Index of first result to be retrieved. Previous indices are skipped.
\param[out] overflow True if a buffer overflow occurred
\param[in] queryFlags Optional flags controlling the query.
\return Number of overlaps found, i.e. number of elements written to the results buffer
@see PxTriangleMeshGeometry getTriangle() PxGeometryQueryFlags
*/
PX_PHYSX_COMMON_API static PxU32 findOverlapTriangleMesh( const PxGeometry& geom, const PxTransform& geomPose,
const PxTriangleMeshGeometry& meshGeom, const PxTransform& meshPose,
PxU32* results, PxU32 maxResults, PxU32 startIndex, bool& overflow,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT);
/**
\brief Mesh-vs-mesh overlap test
A specialized findOverlapTriangleMesh function for mesh-vs-mesh. The other findOverlapTriangleMesh() function above cannot be used
directly since it only returns a single set of triangle indices that belongs to one of the meshes only. This function returns pairs
of triangle indices that belong to both the first & second input meshes.
Returned triangle indices can be used with #getTriangle() to retrieve the triangle properties.
\note This is only implemented for the PxMeshMidPhase::eBVH34 data structure.
\param[in] callback The callback object used to report results
\param[in] meshGeom0 First triangle mesh geometry
\param[in] meshPose0 Pose of first triangle mesh geometry
\param[in] meshGeom1 Second triangle mesh geometry
\param[in] meshPose1 Pose of second triangle mesh geometry
\param[in] queryFlags Optional flags controlling the query.
\param[in] meshMeshFlags Optional flags controlling the query.
\param[in] tolerance Optional tolerance distance
\return true if an overlap has been detected, false if the meshes are disjoint
@see PxTriangleMeshGeometry getTriangle() PxReportCallback PxGeometryQueryFlags PxMeshMeshQueryFlags
*/
PX_PHYSX_COMMON_API static bool findOverlapTriangleMesh(PxReportCallback<PxGeomIndexPair>& callback,
const PxTriangleMeshGeometry& meshGeom0, const PxTransform& meshPose0,
const PxTriangleMeshGeometry& meshGeom1, const PxTransform& meshPose1,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT,
PxMeshMeshQueryFlags meshMeshFlags = PxMeshMeshQueryFlag::eDEFAULT,
float tolerance = 0.0f);
/**
\brief Find the height field triangles which touch the specified geometry object.
Returned triangle indices can be used with #getTriangle() to retrieve the triangle properties.
\param[in] geom The geometry object to test for height field overlaps. Supported geometries are #PxSphereGeometry, #PxCapsuleGeometry and #PxBoxGeometry. The sphere and capsule queries are currently conservative estimates.
\param[in] geomPose Pose of the geometry object
\param[in] hfGeom The height field geometry to check overlap against
\param[in] hfPose Pose of the height field
\param[out] results Indices of overlapping triangles
\param[in] maxResults Size of 'results' buffer
\param[in] startIndex Index of first result to be retrieved. Previous indices are skipped.
\param[out] overflow True if a buffer overflow occurred
\param[in] queryFlags Optional flags controlling the query.
\return Number of overlaps found, i.e. number of elements written to the results buffer
@see PxHeightFieldGeometry getTriangle() PxGeometryQueryFlags
*/
PX_PHYSX_COMMON_API static PxU32 findOverlapHeightField(const PxGeometry& geom, const PxTransform& geomPose,
const PxHeightFieldGeometry& hfGeom, const PxTransform& hfPose,
PxU32* results, PxU32 maxResults, PxU32 startIndex, bool& overflow,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT);
/**
\brief Sweep a specified geometry object in space and test for collision with a set of given triangles.
This function simply sweeps input geometry against each input triangle, in the order they are given.
This is an O(N) operation with N = number of input triangles. It does not use any particular acceleration structure.
\param[in] unitDir Normalized direction of the sweep.
\param[in] distance Sweep distance. Needs to be larger than 0. Clamped to PX_MAX_SWEEP_DISTANCE.
\param[in] geom The geometry object to sweep. Supported geometries are #PxSphereGeometry, #PxCapsuleGeometry and #PxBoxGeometry
\param[in] pose Pose of the geometry object to sweep.
\param[in] triangleCount Number of specified triangles
\param[in] triangles Array of triangles to sweep against
\param[out] sweepHit The sweep hit information. See the notes below for limitations about returned results.
\param[in] hitFlags Specification of the kind of information to retrieve on hit. Combination of #PxHitFlag flags. See the notes below for limitations about supported flags.
\param[in] cachedIndex Cached triangle index for subsequent calls. Cached triangle is tested first. Optional parameter.
\param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal.
\param[in] doubleSided Counterpart of PxMeshGeometryFlag::eDOUBLE_SIDED for input triangles.
\param[in] queryFlags Optional flags controlling the query.
\return True if the swept geometry object hits the specified triangles
\note Only the following geometry types are currently supported: PxSphereGeometry, PxCapsuleGeometry, PxBoxGeometry
\note If a shape from the scene is already overlapping with the query shape in its starting position, the hit is returned unless eASSUME_NO_INITIAL_OVERLAP was specified.
\note This function returns a single closest hit across all the input triangles. Multiple hits are not supported.
\note Supported hitFlags are PxHitFlag::eDEFAULT, PxHitFlag::eASSUME_NO_INITIAL_OVERLAP, PxHitFlag::ePRECISE_SWEEP, PxHitFlag::eMESH_BOTH_SIDES, PxHitFlag::eMESH_ANY.
\note ePOSITION is only defined when there is no initial overlap (sweepHit.hadInitialOverlap() == false)
\note The returned normal for initially overlapping sweeps is set to -unitDir.
\note Otherwise the returned normal is the front normal of the triangle even if PxHitFlag::eMESH_BOTH_SIDES is set.
\note The returned PxGeomSweepHit::faceIndex parameter will hold the index of the hit triangle in input array, i.e. the range is [0; triangleCount). For initially overlapping sweeps, this is the index of overlapping triangle.
\note The inflation parameter is not compatible with PxHitFlag::ePRECISE_SWEEP.
@see PxTriangle PxSweepHit PxGeometry PxTransform PxGeometryQueryFlags
*/
PX_PHYSX_COMMON_API static bool sweep(const PxVec3& unitDir,
const PxReal distance,
const PxGeometry& geom,
const PxTransform& pose,
PxU32 triangleCount,
const PxTriangle* triangles,
PxGeomSweepHit& sweepHit,
PxHitFlags hitFlags = PxHitFlag::eDEFAULT,
const PxU32* cachedIndex = NULL,
const PxReal inflation = 0.0f,
bool doubleSided = false,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT);
};
#if !PX_DOXYGEN
}
#endif
/** @} */
#endif
| 13,051 | C | 50.385827 | 242 | 0.767144 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxReportCallback.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_REPORT_CALLBACK_H
#define PX_REPORT_CALLBACK_H
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxArray.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Base class for callback reporting an unknown number of items to users.
This can be used as-is and customized by users, or several pre-designed callbacks can be used instead (see below).
This design lets users decide how to retrieve the results of a query:
- either one by one via a regular callback
- or one batch at a time via a callback
- or written out directly to their own C-style buffer
- or pushed back to their own PxArray
- etc
@see PxRegularReportCallback PxLocalStorageReportCallback PxExternalStorageReportCallback PxDynamicArrayReportCallback
*/
template<class T>
class PxReportCallback
{
public:
PxReportCallback(T* buffer=NULL, PxU32 capacity=0) : mBuffer(buffer), mCapacity(capacity), mSize(0) {}
virtual ~PxReportCallback() {}
T* mBuffer; // Destination buffer for writing results. if NULL, the system will use its internal buffer and set that pointer as it sees fit.
// Otherwise users can set it to where they want the results to be written.
PxU32 mCapacity; // Capacity of mBuffer. If mBuffer is NULL, this controls how many items are reported to users at the same time (with a limit of 256).
PxU32 mSize; //!< Current number of items in the buffer. This is entirely managed by the system.
/**
\brief Reports query results to users.
This will be called by the system as many times as necessary to report all results.
\param[in] nbItems Number of reported items
\param[in] items array of reported items
\return true to continue the query, false to abort the query
*/
virtual bool flushResults(PxU32 nbItems, const T* items) = 0;
};
/**
\brief Regular report callback
This reports results like a regular callback would:
- without explicit buffer management from users
- by default, one item at a time
This customized callback sends results to users via the processResults() function.
The capacity parameter dictates how many items can be reported at a time,
i.e. how many times the flushResults/processResults function will be called by the system.
@see PxReportCallback
*/
template<class T>
class PxRegularReportCallback : public PxReportCallback<T>
{
public:
PxRegularReportCallback(const PxU32 capacity=1)
{
PX_ASSERT(capacity<=256);
this->mCapacity = capacity;
}
virtual bool flushResults(PxU32 nbItems, const T* items)
{
PX_ASSERT(nbItems<=this->mCapacity);
PX_ASSERT(items==this->mBuffer);
return processResults(nbItems, items);
}
/**
\brief Reports query results to users.
\param[in] nbItems Number of reported items
\param[in] items array of reported items
\return true to continue the query, false to abort the query
*/
virtual bool processResults(PxU32 nbItems, const T* items) = 0;
};
/**
\brief Local storage report callback
This is the same as a regular callback, except the destination buffer is a local buffer within the class.
This customized callback sends results to users via the processResults() function.
The capacity of the embedded buffer (determined by a template parameter) dictates how many items can be reported at a time,
i.e. how many times the flushResults/processResults function will be called by the system.
@see PxReportCallback
*/
template<class T, const PxU32 capacityT>
class PxLocalStorageReportCallback : public PxReportCallback<T>
{
T mLocalStorage[capacityT];
public:
PxLocalStorageReportCallback()
{
this->mBuffer = mLocalStorage;
this->mCapacity = capacityT;
}
virtual bool flushResults(PxU32 nbItems, const T* items)
{
PX_ASSERT(items==mLocalStorage);
PX_ASSERT(nbItems<=this->mCapacity);
return processResults(nbItems, items);
}
/**
\brief Reports query results to users.
\param[in] nbItems Number of reported items
\param[in] items array of reported items
\return true to continue the query, false to abort the query
*/
virtual bool processResults(PxU32 nbItems, const T* items) = 0;
};
/**
\brief External storage report callback
This is the same as a regular callback, except the destination buffer is a user-provided external buffer.
Typically the provided buffer can be larger here than for PxLocalStorageReportCallback, and it could
even be a scratchpad-kind of memory shared by multiple sub-systems.
This would be the same as having a C-style buffer to write out results in the query interface.
This customized callback sends results to users via the processResults() function.
The capacity parameter dictates how many items can be reported at a time,
i.e. how many times the flushResults/processResults function will be called by the system.
@see PxReportCallback
*/
template<class T>
class PxExternalStorageReportCallback : public PxReportCallback<T>
{
public:
PxExternalStorageReportCallback(T* buffer, PxU32 capacity)
{
this->mBuffer = buffer;
this->mCapacity = capacity;
}
virtual bool flushResults(PxU32 nbItems, const T* items)
{
PX_ASSERT(items==this->mBuffer);
PX_ASSERT(nbItems<=this->mCapacity);
return processResults(nbItems, items);
}
/**
\brief Reports query results to users.
\param[in] nbItems Number of reported items
\param[in] items array of reported items
\return true to continue the query, false to abort the query
*/
virtual bool processResults(PxU32 nbItems, const T* items) = 0;
};
/**
\brief Dynamic array report callback
This callback emulates the behavior of pushing results to a (user-provided) dynamic array.
This customized callback does not actually call users back during the query, results are
available afterwards in the provided dynamic array. This would be the same as having a PxArray
directly in the query interface.
@see PxReportCallback
*/
template<class T>
class PxDynamicArrayReportCallback : public PxReportCallback<T>
{
public:
PxDynamicArrayReportCallback(PxArray<T>& results) : mResults(results)
{
mResults.reserve(32);
this->mBuffer = mResults.begin();
this->mCapacity = mResults.capacity();
}
virtual bool flushResults(PxU32 nbItems, const T* /*items*/)
{
const PxU32 size = mResults.size();
const PxU32 capa = mResults.capacity();
const PxU32 newSize = size+nbItems;
PX_ASSERT(newSize<=capa);
mResults.forceSize_Unsafe(newSize);
if(newSize==capa)
{
const PxU32 newCapa = capa*2;
mResults.reserve(newCapa);
this->mBuffer = mResults.begin() + newSize;
this->mCapacity = mResults.capacity() - newSize;
}
return true;
}
PxArray<T>& mResults;
};
#if !PX_DOXYGEN
}
#endif
/** @} */
#endif
| 8,721 | C | 32.546154 | 155 | 0.723082 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxTetrahedronMesh.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TETRAHEDRON_MESH_H
#define PX_TETRAHEDRON_MESH_H
/** \addtogroup geomutils
@{ */
#include "foundation/PxVec3.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxUserAllocated.h"
#include "common/PxPhysXCommonConfig.h"
#include "common/PxBase.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
struct PxTetrahedronMeshFlag
{
enum Enum
{
e16_BIT_INDICES = (1 << 1) //!< The tetrahedron mesh has 16bits vertex indices
};
};
/**
\brief collection of set bits defined in PxTetrahedronMeshFlag.
@see PxTetrahedronMeshFlag
*/
typedef PxFlags<PxTetrahedronMeshFlag::Enum, PxU8> PxTetrahedronMeshFlags;
PX_FLAGS_OPERATORS(PxTetrahedronMeshFlag::Enum, PxU8)
/**
\brief A data container providing mass, rest pose and other information required for softbody simulation
Stores properties of softbody like inverse mass per node, rest pose matrix per tetrahedral element etc.
Mainly used internally to store runtime data.
*/
class PxSoftBodyAuxData : public PxRefCounted
{
public:
/**
\brief Decrements the reference count of a tetrahedron mesh and releases it if the new reference count is zero.
@see PxPhysics.createTetrahedronMesh()
*/
virtual void release() = 0;
/**
\brief Get the inverse mass of each vertex of the tetrahedron mesh.
\return PxReal* A pointer to an array of inverse mass for each vertex of the tetrahedron mesh. Size: number of vertices * sizeof(PxReal).
*/
virtual PxReal* getGridModelInvMass() = 0;
protected:
PX_INLINE PxSoftBodyAuxData(PxType concreteType, PxBaseFlags baseFlags) : PxRefCounted(concreteType, baseFlags) {}
PX_INLINE PxSoftBodyAuxData(PxBaseFlags baseFlags) : PxRefCounted(baseFlags) {}
virtual ~PxSoftBodyAuxData() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxSoftBodyAuxData", PxRefCounted); }
};
/**
\brief A tetramedron mesh, also called a 'tetrahedron soup'.
It is represented as an indexed tetrahedron list. There are no restrictions on the
tetrahedron data.
To avoid duplicating data when you have several instances of a particular
mesh positioned differently, you do not use this class to represent a
mesh object directly. Instead, you create an instance of this mesh via
the PxTetrahedronMeshGeometry and PxShape classes.
<h3>Creation</h3>
To create an instance of this class call PxPhysics::createTetrahedronMesh(),
and release() to delete it. This is only possible
once you have released all of its PxShape instances.
<h3>Visualizations:</h3>
\li #PxVisualizationParameter::eCOLLISION_AABBS
\li #PxVisualizationParameter::eCOLLISION_SHAPES
\li #PxVisualizationParameter::eCOLLISION_AXES
\li #PxVisualizationParameter::eCOLLISION_FNORMALS
\li #PxVisualizationParameter::eCOLLISION_EDGES
@see PxTetrahedronMeshDesc PxTetrahedronMeshGeometry PxShape PxPhysics.createTetrahedronMesh()
*/
class PxTetrahedronMesh : public PxRefCounted
{
public:
/**
\brief Returns the number of vertices.
\return number of vertices
@see getVertices()
*/
virtual PxU32 getNbVertices() const = 0;
/**
\brief Returns the vertices
\return array of vertices
@see getNbVertices()
*/
virtual const PxVec3* getVertices() const = 0;
/**
\brief Returns the number of tetrahedrons.
\return number of tetrahedrons
@see getTetrahedrons()
*/
virtual PxU32 getNbTetrahedrons() const = 0;
/**
\brief Returns the tetrahedron indices.
The indices can be 16 or 32bit depending on the number of tetrahedrons in the mesh.
Call getTetrahedronMeshFlags() to know if the indices are 16 or 32 bits.
The number of indices is the number of tetrahedrons * 4.
\return array of tetrahedrons
@see getNbTetrahedron() getTetrahedronMeshFlags() getTetrahedraRemap()
*/
virtual const void* getTetrahedrons() const = 0;
/**
\brief Reads the PxTetrahedronMesh flags.
See the list of flags #PxTetrahedronMeshFlags
\return The values of the PxTetrahedronMesh flags.
*/
virtual PxTetrahedronMeshFlags getTetrahedronMeshFlags() const = 0;
/**
\brief Returns the tetrahedra remapping table.
The tetrahedra are internally sorted according to various criteria. Hence the internal tetrahedron order
does not always match the original (user-defined) order. The remapping table helps finding the old
indices knowing the new ones:
remapTable[ internalTetrahedronIndex ] = originalTetrahedronIndex
\return the remapping table (or NULL if 'PxCookingParams::suppressTriangleMeshRemapTable' has been used)
@see getNbTetrahedron() getTetrahedrons() PxCookingParams::suppressTriangleMeshRemapTable
*/
virtual const PxU32* getTetrahedraRemap() const = 0;
/**
\brief Returns the local-space (vertex space) AABB from the tetrahedron mesh.
\return local-space bounds
*/
virtual PxBounds3 getLocalBounds() const = 0;
/**
\brief Decrements the reference count of a tetrahedron mesh and releases it if the new reference count is zero.
@see PxPhysics.createTetrahedronMesh()
*/
virtual void release() = 0;
protected:
PX_INLINE PxTetrahedronMesh(PxType concreteType, PxBaseFlags baseFlags) : PxRefCounted(concreteType, baseFlags) {}
PX_INLINE PxTetrahedronMesh(PxBaseFlags baseFlags) : PxRefCounted(baseFlags) {}
virtual ~PxTetrahedronMesh() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxTetrahedronMesh", PxRefCounted); }
};
/**
\brief A softbody mesh, containing structures to store collision shape, simulation shape and deformation state
The class bundles shapes and deformation state of a softbody that is simulated using FEM. The meshes used for
collision detection and for the FEM calculations are both tetrahedral meshes. While collision detection requires
a mesh that matches the surface of the simulated body as exactly as possible, the simulation mesh has more freedom
such that it can be optimized for tetrahedra without small angles and nodes that aren't shared by too many elements.
<h3>Creation</h3>
To create an instance of this class call PxPhysics::createSoftBodyMesh(),
and release() to delete it. This is only possible
once you have released all of its PxShape instances.
*/
class PxSoftBodyMesh : public PxRefCounted
{
public:
/**
\brief Const accecssor to the softbody's collision mesh.
@see PxTetrahedronMesh
*/
virtual const PxTetrahedronMesh* getCollisionMesh() const = 0;
/**
\brief Accecssor to the softbody's collision mesh.
@see PxTetrahedronMesh
*/
virtual PxTetrahedronMesh* getCollisionMesh() = 0;
/**
\brief Const accessor to the softbody's simulation mesh.
@see PxTetrahedronMesh
*/
virtual const PxTetrahedronMesh* getSimulationMesh() const = 0;
/**
\brief Accecssor to the softbody's simulation mesh.
@see PxTetrahedronMesh
*/
virtual PxTetrahedronMesh* getSimulationMesh() = 0;
/**
\brief Const accessor to the softbodies simulation state.
@see PxSoftBodyAuxData
*/
virtual const PxSoftBodyAuxData* getSoftBodyAuxData() const = 0;
/**
\brief Accessor to the softbody's auxilary data like mass and rest pose information
@see PxSoftBodyAuxData
*/
virtual PxSoftBodyAuxData* getSoftBodyAuxData() = 0;
/**
\brief Decrements the reference count of a tetrahedron mesh and releases it if the new reference count is zero.
@see PxPhysics.createTetrahedronMesh()
*/
virtual void release() = 0;
protected:
PX_INLINE PxSoftBodyMesh(PxType concreteType, PxBaseFlags baseFlags) : PxRefCounted(concreteType, baseFlags) {}
PX_INLINE PxSoftBodyMesh(PxBaseFlags baseFlags) : PxRefCounted(baseFlags) {}
virtual ~PxSoftBodyMesh() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxSoftBodyMesh", PxRefCounted); }
};
/**
\brief Contains information about how to update the collision mesh's vertices given a deformed simulation tetmesh.
@see PxTetrahedronMeshData
*/
class PxCollisionMeshMappingData : public PxUserAllocated
{
public:
virtual void release() = 0;
virtual ~PxCollisionMeshMappingData() {}
};
/**
\brief Stores data to accelerate collision detection of a tetrahedral mesh
@see PxTetrahedronMeshData
*/
class PxSoftBodyCollisionData : public PxUserAllocated
{
};
/**
\brief Contains raw geometry information describing the tetmesh's vertices and its elements (tetrahedra)
@see PxTetrahedronMeshData
*/
class PxTetrahedronMeshData : public PxUserAllocated
{
};
/**
\brief Stores data to compute and store the state of a deformed tetrahedral mesh
@see PxTetrahedronMeshData
*/
class PxSoftBodySimulationData : public PxUserAllocated
{
};
/**
\brief Conbines PxTetrahedronMeshData and PxSoftBodyCollisionData
@see PxTetrahedronMeshData PxSoftBodyCollisionData
*/
class PxCollisionTetrahedronMeshData : public PxUserAllocated
{
public:
virtual const PxTetrahedronMeshData* getMesh() const = 0;
virtual PxTetrahedronMeshData* getMesh() = 0;
virtual const PxSoftBodyCollisionData* getData() const = 0;
virtual PxSoftBodyCollisionData* getData() = 0;
virtual void release() = 0;
virtual ~PxCollisionTetrahedronMeshData() {}
};
/**
\brief Conbines PxTetrahedronMeshData and PxSoftBodyCollisionData
@see PxTetrahedronMeshData PxSoftBodySimulationData
*/
class PxSimulationTetrahedronMeshData : public PxUserAllocated
{
public:
virtual PxTetrahedronMeshData* getMesh() = 0;
virtual PxSoftBodySimulationData* getData() = 0;
virtual void release() = 0;
virtual ~PxSimulationTetrahedronMeshData() {}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 11,417 | C | 29.859459 | 139 | 0.748883 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxHeightFieldDesc.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_HEIGHTFIELD_DESC_H
#define PX_HEIGHTFIELD_DESC_H
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "geometry/PxHeightFieldFlag.h"
#include "common/PxCoreUtilityTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Descriptor class for #PxHeightField.
\note The heightfield data is *copied* when a PxHeightField object is created from this descriptor. After the call the
user may discard the height data.
@see PxHeightField PxHeightFieldGeometry PxShape PxPhysics.createHeightField() PxCooking.createHeightField()
*/
class PxHeightFieldDesc
{
public:
/**
\brief Number of sample rows in the height field samples array.
\note Local space X-axis corresponds to rows.
<b>Range:</b> >1<br>
<b>Default:</b> 0
*/
PxU32 nbRows;
/**
\brief Number of sample columns in the height field samples array.
\note Local space Z-axis corresponds to columns.
<b>Range:</b> >1<br>
<b>Default:</b> 0
*/
PxU32 nbColumns;
/**
\brief Format of the sample data.
Currently the only supported format is PxHeightFieldFormat::eS16_TM:
<b>Default:</b> PxHeightFieldFormat::eS16_TM
@see PxHeightFormat PxHeightFieldDesc.samples
*/
PxHeightFieldFormat::Enum format;
/**
\brief The samples array.
It is copied to the SDK's storage at creation time.
There are nbRows * nbColumn samples in the array,
which define nbRows * nbColumn vertices and cells,
of which (nbRows - 1) * (nbColumns - 1) cells are actually used.
The array index of sample(row, column) = row * nbColumns + column.
The byte offset of sample(row, column) = sampleStride * (row * nbColumns + column).
The sample data follows at the offset and spans the number of bytes defined by the format.
Then there are zero or more unused bytes depending on sampleStride before the next sample.
<b>Default:</b> NULL
@see PxHeightFormat
*/
PxStridedData samples;
/**
This threshold is used by the collision detection to determine if a height field edge is convex
and can generate contact points.
Usually the convexity of an edge is determined from the angle (or cosine of the angle) between
the normals of the faces sharing that edge.
The height field allows a more efficient approach by comparing height values of neighboring vertices.
This parameter offsets the comparison. Smaller changes than 0.5 will not alter the set of convex edges.
The rule of thumb is that larger values will result in fewer edge contacts.
This parameter is ignored in contact generation with sphere and capsule primitives.
<b>Range:</b> [0, PX_MAX_F32)<br>
<b>Default:</b> 0
*/
PxReal convexEdgeThreshold;
/**
\brief Flags bits, combined from values of the enum ::PxHeightFieldFlag.
<b>Default:</b> 0
@see PxHeightFieldFlag PxHeightFieldFlags
*/
PxHeightFieldFlags flags;
/**
\brief Constructor sets to default.
*/
PX_INLINE PxHeightFieldDesc();
/**
\brief (re)sets the structure to the default.
*/
PX_INLINE void setToDefault();
/**
\brief Returns true if the descriptor is valid.
\return True if the current settings are valid.
*/
PX_INLINE bool isValid() const;
};
PX_INLINE PxHeightFieldDesc::PxHeightFieldDesc() //constructor sets to default
{
nbColumns = 0;
nbRows = 0;
format = PxHeightFieldFormat::eS16_TM;
convexEdgeThreshold = 0.0f;
flags = PxHeightFieldFlags();
}
PX_INLINE void PxHeightFieldDesc::setToDefault()
{
*this = PxHeightFieldDesc();
}
PX_INLINE bool PxHeightFieldDesc::isValid() const
{
if (nbColumns < 2)
return false;
if (nbRows < 2)
return false;
if(format != PxHeightFieldFormat::eS16_TM)
return false;
if (samples.stride < 4)
return false;
if (convexEdgeThreshold < 0)
return false;
if ((flags & PxHeightFieldFlag::eNO_BOUNDARY_EDGES) != flags)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 5,585 | C | 29.032258 | 118 | 0.739481 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxCapsuleGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_CAPSULE_GEOMETRY_H
#define PX_CAPSULE_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#include "foundation/PxFoundationConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Class representing the geometry of a capsule.
Capsules are shaped as the union of a cylinder of length 2 * halfHeight and with the
given radius centered at the origin and extending along the x axis, and two hemispherical ends.
\note The scaling of the capsule is expected to be baked into these values, there is no additional scaling parameter.
The function PxTransformFromSegment is a helper for generating an appropriate transform for the capsule from the capsule's interior line segment.
@see PxTransformFromSegment
*/
class PxCapsuleGeometry : public PxGeometry
{
public:
/**
\brief Constructor, initializes to a capsule with passed radius and half height.
*/
PX_INLINE PxCapsuleGeometry(PxReal radius_=0.0f, PxReal halfHeight_=0.0f) : PxGeometry(PxGeometryType::eCAPSULE), radius(radius_), halfHeight(halfHeight_) {}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxCapsuleGeometry(const PxCapsuleGeometry& that) : PxGeometry(that), radius(that.radius), halfHeight(that.halfHeight) {}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxCapsuleGeometry& that)
{
mType = that.mType;
radius = that.radius;
halfHeight = that.halfHeight;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid.
\note A valid capsule has radius > 0, halfHeight >= 0.
It is illegal to call PxRigidActor::createShape and PxPhysics::createShape with a capsule that has zero radius or height.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_INLINE bool isValid() const;
public:
/**
\brief The radius of the capsule.
*/
PxReal radius;
/**
\brief half of the capsule's height, measured between the centers of the hemispherical ends.
*/
PxReal halfHeight;
};
PX_INLINE bool PxCapsuleGeometry::isValid() const
{
if(mType != PxGeometryType::eCAPSULE)
return false;
if(!PxIsFinite(radius) || !PxIsFinite(halfHeight))
return false;
if(radius <= 0.0f || halfHeight < 0.0f)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 4,028 | C | 32.575 | 158 | 0.750248 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxBoxGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_BOX_GEOMETRY_H
#define PX_BOX_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#include "foundation/PxVec3.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Class representing the geometry of a box.
The geometry of a box can be fully specified by its half extents. This is the half of its width, height, and depth.
\note The scaling of the box is expected to be baked into these values, there is no additional scaling parameter.
*/
class PxBoxGeometry : public PxGeometry
{
public:
/**
\brief Constructor to initialize half extents from scalar parameters.
\param hx Initial half extents' x component.
\param hy Initial half extents' y component.
\param hz Initial half extents' z component.
*/
PX_INLINE PxBoxGeometry(PxReal hx=0.0f, PxReal hy=0.0f, PxReal hz=0.0f) : PxGeometry(PxGeometryType::eBOX), halfExtents(hx, hy, hz) {}
/**
\brief Constructor to initialize half extents from vector parameter.
\param halfExtents_ Initial half extents.
*/
PX_INLINE PxBoxGeometry(PxVec3 halfExtents_) : PxGeometry(PxGeometryType::eBOX), halfExtents(halfExtents_) {}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxBoxGeometry(const PxBoxGeometry& that) : PxGeometry(that), halfExtents(that.halfExtents) {}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxBoxGeometry& that)
{
mType = that.mType;
halfExtents = that.halfExtents;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid
\note A valid box has a positive extent in each direction (halfExtents.x > 0, halfExtents.y > 0, halfExtents.z > 0).
It is illegal to call PxRigidActor::createShape and PxPhysics::createShape with a box that has zero extent in any direction.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_INLINE bool isValid() const;
public:
/**
\brief Half of the width, height, and depth of the box.
*/
PxVec3 halfExtents;
};
PX_INLINE bool PxBoxGeometry::isValid() const
{
if(mType != PxGeometryType::eBOX)
return false;
if(!halfExtents.isFinite())
return false;
if(halfExtents.x <= 0.0f || halfExtents.y <= 0.0f || halfExtents.z <= 0.0f)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 4,004 | C | 32.940678 | 135 | 0.743007 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGeometryQueryFlags.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GEOMETRY_QUERY_FLAGS_H
#define PX_GEOMETRY_QUERY_FLAGS_H
#include "foundation/PxFlags.h"
#include "common/PxPhysXCommonConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Geometry-level query flags.
@see PxScene::raycast PxScene::overlap PxScene::sweep PxBVH::raycast PxBVH::overlap PxBVH::sweep PxGeometryQuery::raycast PxGeometryQuery::overlap PxGeometryQuery::sweep
@see PxGeometryQuery::computePenetration PxGeometryQuery::pointDistance PxGeometryQuery::computeGeomBounds
@see PxMeshQuery::findOverlapTriangleMesh PxMeshQuery::findOverlapHeightField PxMeshQuery::sweep
*/
struct PxGeometryQueryFlag
{
enum Enum
{
eSIMD_GUARD = (1<<0), //!< Saves/restores SIMD control word for each query (safer but slower). Omit this if you took care of it yourself in your app.
eDEFAULT = eSIMD_GUARD
};
};
/**
\brief collection of set bits defined in PxGeometryQueryFlag.
@see PxGeometryQueryFlag
*/
PX_FLAGS_TYPEDEF(PxGeometryQueryFlag, PxU32)
#if !PX_DOXYGEN
}
#endif
#endif
| 2,721 | C | 38.449275 | 170 | 0.76663 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxSimpleTriangleMesh.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SIMPLE_TRIANGLE_MESH_H
#define PX_SIMPLE_TRIANGLE_MESH_H
/** \addtogroup geomutils
@{
*/
#include "foundation/PxVec3.h"
#include "foundation/PxFlags.h"
#include "common/PxCoreUtilityTypes.h"
#include "common/PxPhysXCommonConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Enum with flag values to be used in PxSimpleTriangleMesh::flags.
*/
struct PxMeshFlag
{
enum Enum
{
/**
\brief Specifies if the SDK should flip normals.
The PhysX libraries assume that the face normal of a triangle with vertices [a,b,c] can be computed as:
edge1 = b-a
edge2 = c-a
face_normal = edge1 x edge2.
Note: This is the same as a counterclockwise winding in a right handed coordinate system or
alternatively a clockwise winding order in a left handed coordinate system.
If this does not match the winding order for your triangles, raise the below flag.
*/
eFLIPNORMALS = (1<<0),
e16_BIT_INDICES = (1<<1) //!< Denotes the use of 16-bit vertex indices
};
};
/**
\brief collection of set bits defined in PxMeshFlag.
@see PxMeshFlag
*/
typedef PxFlags<PxMeshFlag::Enum,PxU16> PxMeshFlags;
PX_FLAGS_OPERATORS(PxMeshFlag::Enum,PxU16)
/**
\brief A structure describing a triangle mesh.
*/
class PxSimpleTriangleMesh
{
public:
/**
\brief Pointer to first vertex point.
*/
PxBoundedData points;
/**
\brief Pointer to first triangle.
Caller may add triangleStrideBytes bytes to the pointer to access the next triangle.
These are triplets of 0 based indices:
vert0 vert1 vert2
vert0 vert1 vert2
vert0 vert1 vert2
...
where vertex is either a 32 or 16 bit unsigned integer. There are numTriangles*3 indices.
This is declared as a void pointer because it is actually either an PxU16 or a PxU32 pointer.
*/
PxBoundedData triangles;
/**
\brief Flags bits, combined from values of the enum ::PxMeshFlag
*/
PxMeshFlags flags;
/**
\brief constructor sets to default.
*/
PX_INLINE PxSimpleTriangleMesh();
/**
\brief (re)sets the structure to the default.
*/
PX_INLINE void setToDefault();
/**
\brief returns true if the current settings are valid
*/
PX_INLINE bool isValid() const;
};
PX_INLINE PxSimpleTriangleMesh::PxSimpleTriangleMesh()
{
}
PX_INLINE void PxSimpleTriangleMesh::setToDefault()
{
*this = PxSimpleTriangleMesh();
}
PX_INLINE bool PxSimpleTriangleMesh::isValid() const
{
// Check geometry
if(points.count > 0xffff && flags & PxMeshFlag::e16_BIT_INDICES)
return false;
if(!points.data)
return false;
if(points.stride < sizeof(PxVec3)) //should be at least one point's worth of data
return false;
// Check topology
// The triangles pointer is not mandatory
if(triangles.data)
{
// Indexed mesh
PxU32 limit = (flags & PxMeshFlag::e16_BIT_INDICES) ? sizeof(PxU16)*3 : sizeof(PxU32)*3;
if(triangles.stride < limit)
return false;
}
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 4,636 | C | 27.10303 | 105 | 0.734901 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxHeightFieldFlag.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_HEIGHT_FIELD_FLAG_H
#define PX_HEIGHT_FIELD_FLAG_H
/** \addtogroup geomutils
@{
*/
#include "foundation/PxFlags.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Describes the format of height field samples.
@see PxHeightFieldDesc.format PxHeightFieldDesc.samples
*/
struct PxHeightFieldFormat
{
enum Enum
{
/**
\brief Height field height data is 16 bit signed integers, followed by triangle materials.
Each sample is 32 bits wide arranged as follows:
\image html heightFieldFormat_S16_TM.png
1) First there is a 16 bit height value.
2) Next, two one byte material indices, with the high bit of each byte reserved for special use.
(so the material index is only 7 bits).
The high bit of material0 is the tess-flag.
The high bit of material1 is reserved for future use.
There are zero or more unused bytes before the next sample depending on PxHeightFieldDesc.sampleStride,
where the application may eventually keep its own data.
This is the only format supported at the moment.
@see PxHeightFieldDesc.format PxHeightFieldDesc.samples
*/
eS16_TM = (1 << 0)
};
};
/**
\brief Determines the tessellation of height field cells.
@see PxHeightFieldDesc.format PxHeightFieldDesc.samples
*/
struct PxHeightFieldTessFlag
{
enum Enum
{
/**
\brief This flag determines which way each quad cell is subdivided.
The flag lowered indicates subdivision like this: (the 0th vertex is referenced by only one triangle)
\image html heightfieldTriMat2.PNG
<pre>
+--+--+--+---> column
| /| /| /|
|/ |/ |/ |
+--+--+--+
| /| /| /|
|/ |/ |/ |
+--+--+--+
|
|
V row
</pre>
The flag raised indicates subdivision like this: (the 0th vertex is shared by two triangles)
\image html heightfieldTriMat1.PNG
<pre>
+--+--+--+---> column
|\ |\ |\ |
| \| \| \|
+--+--+--+
|\ |\ |\ |
| \| \| \|
+--+--+--+
|
|
V row
</pre>
@see PxHeightFieldDesc.format PxHeightFieldDesc.samples
*/
e0TH_VERTEX_SHARED = (1 << 0)
};
};
/**
\brief Enum with flag values to be used in PxHeightFieldDesc.flags.
*/
struct PxHeightFieldFlag
{
enum Enum
{
/**
\brief Disable collisions with height field with boundary edges.
Raise this flag if several terrain patches are going to be placed adjacent to each other,
to avoid a bump when sliding across.
This flag is ignored in contact generation with sphere and capsule shapes.
@see PxHeightFieldDesc.flags
*/
eNO_BOUNDARY_EDGES = (1 << 0)
};
};
/**
\brief collection of set bits defined in PxHeightFieldFlag.
@see PxHeightFieldFlag
*/
typedef PxFlags<PxHeightFieldFlag::Enum,PxU16> PxHeightFieldFlags;
PX_FLAGS_OPERATORS(PxHeightFieldFlag::Enum,PxU16)
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 4,482 | C | 26.84472 | 106 | 0.713075 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGeometryHit.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GEOMETRY_HIT_H
#define PX_GEOMETRY_HIT_H
/** \addtogroup scenequery
@{
*/
#include "foundation/PxVec3.h"
#include "foundation/PxFlags.h"
#include "common/PxPhysXCommonConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Scene query and geometry query behavior flags.
PxHitFlags are used for 3 different purposes:
1) To request hit fields to be filled in by scene queries (such as hit position, normal, face index or UVs).
2) Once query is completed, to indicate which fields are valid (note that a query may produce more valid fields than requested).
3) To specify additional options for the narrow phase and mid-phase intersection routines.
All these flags apply to both scene queries and geometry queries (PxGeometryQuery).
@see PxRaycastHit PxSweepHit PxOverlapHit PxScene.raycast PxScene.sweep PxScene.overlap PxGeometryQuery PxFindFaceIndex
*/
struct PxHitFlag
{
enum Enum
{
ePOSITION = (1<<0), //!< "position" member of #PxQueryHit is valid
eNORMAL = (1<<1), //!< "normal" member of #PxQueryHit is valid
eUV = (1<<3), //!< "u" and "v" barycentric coordinates of #PxQueryHit are valid. Not applicable to sweep queries.
eASSUME_NO_INITIAL_OVERLAP = (1<<4), //!< Performance hint flag for sweeps when it is known upfront there's no initial overlap.
//!< NOTE: using this flag may cause undefined results if shapes are initially overlapping.
eANY_HIT = (1<<5), //!< Report any first hit. Used for geometries that contain more than one primitive. For meshes,
//!< if neither eMESH_MULTIPLE nor eANY_HIT is specified, a single closest hit will be reported.
eMESH_MULTIPLE = (1<<6), //!< Report all hits for meshes rather than just the first. Not applicable to sweep queries.
eMESH_ANY = eANY_HIT, //!< @deprecated Deprecated, please use eANY_HIT instead.
eMESH_BOTH_SIDES = (1<<7), //!< Report hits with back faces of mesh triangles. Also report hits for raycast
//!< originating on mesh surface and facing away from the surface normal. Not applicable to sweep queries.
//!< Please refer to the user guide for heightfield-specific differences.
ePRECISE_SWEEP = (1<<8), //!< Use more accurate but slower narrow phase sweep tests.
//!< May provide better compatibility with PhysX 3.2 sweep behavior.
eMTD = (1<<9), //!< Report the minimum translation depth, normal and contact point.
eFACE_INDEX = (1<<10), //!< "face index" member of #PxQueryHit is valid
eDEFAULT = ePOSITION|eNORMAL|eFACE_INDEX,
/** \brief Only this subset of flags can be modified by pre-filter. Other modifications will be discarded. */
eMODIFIABLE_FLAGS = eMESH_MULTIPLE|eMESH_BOTH_SIDES|eASSUME_NO_INITIAL_OVERLAP|ePRECISE_SWEEP
};
};
/**
\brief collection of set bits defined in PxHitFlag.
@see PxHitFlag
*/
PX_FLAGS_TYPEDEF(PxHitFlag, PxU16)
/**
\brief Scene query hit information.
*/
struct PxQueryHit
{
PX_INLINE PxQueryHit() : faceIndex(0xFFFFffff) {}
/**
Face index of touched triangle, for triangle meshes, convex meshes and height fields.
\note This index will default to 0xFFFFffff value for overlap queries.
\note Please refer to the user guide for more details for sweep queries.
\note This index is remapped by mesh cooking. Use #PxTriangleMesh::getTrianglesRemap() to convert to original mesh index.
\note For convex meshes use #PxConvexMesh::getPolygonData() to retrieve touched polygon data.
*/
PxU32 faceIndex;
};
/**
\brief Scene query hit information for raycasts and sweeps returning hit position and normal information.
::PxHitFlag flags can be passed to scene query functions, as an optimization, to cause the SDK to
only generate specific members of this structure.
*/
struct PxLocationHit : PxQueryHit
{
PX_INLINE PxLocationHit() : flags(0), position(PxVec3(0)), normal(PxVec3(0)), distance(PX_MAX_REAL) {}
/**
\note For raycast hits: true for shapes overlapping with raycast origin.
\note For sweep hits: true for shapes overlapping at zero sweep distance.
@see PxRaycastHit PxSweepHit
*/
PX_INLINE bool hadInitialOverlap() const { return (distance <= 0.0f); }
// the following fields are set in accordance with the #PxHitFlags
PxHitFlags flags; //!< Hit flags specifying which members contain valid values.
PxVec3 position; //!< World-space hit position (flag: #PxHitFlag::ePOSITION)
PxVec3 normal; //!< World-space hit normal (flag: #PxHitFlag::eNORMAL)
/**
\brief Distance to hit.
\note If the eMTD flag is used, distance will be a negative value if shapes are overlapping indicating the penetration depth.
\note Otherwise, this value will be >= 0 */
PxF32 distance;
};
/**
\brief Stores results of raycast queries.
::PxHitFlag flags can be passed to raycast function, as an optimization, to cause the SDK to only compute specified members of this
structure.
Some members like barycentric coordinates are currently only computed for triangle meshes and height fields, but next versions
might provide them in other cases. The client code should check #flags to make sure returned values are valid.
@see PxScene.raycast
*/
struct PxGeomRaycastHit : PxLocationHit
{
PX_INLINE PxGeomRaycastHit() : u(0.0f), v(0.0f) {}
// the following fields are set in accordance with the #PxHitFlags
PxReal u, v; //!< barycentric coordinates of hit point, for triangle mesh and height field (flag: #PxHitFlag::eUV)
};
/**
\brief Stores results of overlap queries.
@see PxScene.overlap
*/
struct PxGeomOverlapHit : PxQueryHit
{
PX_INLINE PxGeomOverlapHit() {}
};
/**
\brief Stores results of sweep queries.
@see PxScene.sweep
*/
struct PxGeomSweepHit : PxLocationHit
{
PX_INLINE PxGeomSweepHit() {}
};
/**
\brief Pair of indices, typically either object or triangle indices.
*/
struct PxGeomIndexPair
{
PX_FORCE_INLINE PxGeomIndexPair() {}
PX_FORCE_INLINE PxGeomIndexPair(PxU32 _id0, PxU32 _id1) : id0(_id0), id1(_id1) {}
PxU32 id0, id1;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 7,749 | C | 38.540816 | 131 | 0.734288 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxHeightField.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_HEIGHTFIELD_H
#define PX_HEIGHTFIELD_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxHeightFieldFlag.h"
#include "geometry/PxHeightFieldSample.h"
#include "common/PxBase.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxHeightFieldDesc;
/**
\brief A height field class.
Height fields work in a similar way as triangle meshes specified to act as
height fields, with some important differences:
Triangle meshes can be made of nonuniform geometry, while height fields are
regular, rectangular grids. This means that with PxHeightField, you sacrifice
flexibility in return for improved performance and decreased memory consumption.
In local space rows extend in X direction, columns in Z direction and height in Y direction.
Like Convexes and TriangleMeshes, HeightFields are referenced by shape instances
(see #PxHeightFieldGeometry, #PxShape).
To avoid duplicating data when you have several instances of a particular
height field differently, you do not use this class to represent a
height field object directly. Instead, you create an instance of this height field
via the PxHeightFieldGeometry and PxShape classes.
<h3>Creation</h3>
To create an instance of this class call PxPhysics::createHeightField() or
PxCooking::createHeightField(const PxHeightFieldDesc&, PxInsertionCallback&).
To delete it call release(). This is only possible
once you have released all of its PxHeightFiedShape instances.
<h3>Visualizations:</h3>
\li #PxVisualizationParameter::eCOLLISION_AABBS
\li #PxVisualizationParameter::eCOLLISION_SHAPES
\li #PxVisualizationParameter::eCOLLISION_AXES
\li #PxVisualizationParameter::eCOLLISION_FNORMALS
\li #PxVisualizationParameter::eCOLLISION_EDGES
@see PxHeightFieldDesc PxHeightFieldGeometry PxShape PxPhysics.createHeightField() PxCooking.createHeightField()
*/
class PxHeightField : public PxRefCounted
{
public:
/**
\brief Decrements the reference count of a height field and releases it if the new reference count is zero.
@see PxPhysics.createHeightField() PxHeightFieldDesc PxHeightFieldGeometry PxShape
*/
virtual void release() = 0;
/**
\brief Writes out the sample data array.
The user provides destBufferSize bytes storage at destBuffer.
The data is formatted and arranged as PxHeightFieldDesc.samples.
\param[out] destBuffer The destination buffer for the sample data.
\param[in] destBufferSize The size of the destination buffer.
\return The number of bytes written.
@see PxHeightFieldDesc.samples
*/
virtual PxU32 saveCells(void* destBuffer, PxU32 destBufferSize) const = 0;
/**
\brief Replaces a rectangular subfield in the sample data array.
The user provides the description of a rectangular subfield in subfieldDesc.
The data is formatted and arranged as PxHeightFieldDesc.samples.
\param[in] startCol First cell in the destination heightfield to be modified. Can be negative.
\param[in] startRow First row in the destination heightfield to be modified. Can be negative.
\param[in] subfieldDesc Description of the source subfield to read the samples from.
\param[in] shrinkBounds If left as false, the bounds will never shrink but only grow. If set to true the bounds will be recomputed from all HF samples at O(nbColums*nbRows) perf cost.
\return True on success, false on failure. Failure can occur due to format mismatch.
\note Modified samples are constrained to the same height quantization range as the original heightfield.
Source samples that are out of range of target heightfield will be clipped with no error.
PhysX does not keep a mapping from the heightfield to heightfield shapes that reference it.
Call PxShape::setGeometry on each shape which references the height field, to ensure that internal data structures are updated to reflect the new geometry.
Please note that PxShape::setGeometry does not guarantee correct/continuous behavior when objects are resting on top of old or new geometry.
@see PxHeightFieldDesc.samples PxShape.setGeometry
*/
virtual bool modifySamples(PxI32 startCol, PxI32 startRow, const PxHeightFieldDesc& subfieldDesc, bool shrinkBounds = false) = 0;
/**
\brief Retrieves the number of sample rows in the samples array.
\return The number of sample rows in the samples array.
@see PxHeightFieldDesc.nbRows
*/
virtual PxU32 getNbRows() const = 0;
/**
\brief Retrieves the number of sample columns in the samples array.
\return The number of sample columns in the samples array.
@see PxHeightFieldDesc.nbColumns
*/
virtual PxU32 getNbColumns() const = 0;
/**
\brief Retrieves the format of the sample data.
\return The format of the sample data.
@see PxHeightFieldDesc.format PxHeightFieldFormat
*/
virtual PxHeightFieldFormat::Enum getFormat() const = 0;
/**
\brief Retrieves the offset in bytes between consecutive samples in the array.
\return The offset in bytes between consecutive samples in the array.
@see PxHeightFieldDesc.sampleStride
*/
virtual PxU32 getSampleStride() const = 0;
/**
\brief Retrieves the convex edge threshold.
\return The convex edge threshold.
@see PxHeightFieldDesc.convexEdgeThreshold
*/
virtual PxReal getConvexEdgeThreshold() const = 0;
/**
\brief Retrieves the flags bits, combined from values of the enum ::PxHeightFieldFlag.
\return The flags bits, combined from values of the enum ::PxHeightFieldFlag.
@see PxHeightFieldDesc.flags PxHeightFieldFlag
*/
virtual PxHeightFieldFlags getFlags() const = 0;
/**
\brief Retrieves the height at the given coordinates in grid space.
\return The height at the given coordinates or 0 if the coordinates are out of range.
*/
virtual PxReal getHeight(PxReal x, PxReal z) const = 0;
/**
\brief Returns material table index of given triangle
\note This function takes a post cooking triangle index.
\param[in] triangleIndex (internal) index of desired triangle
\return Material table index, or 0xffff if no per-triangle materials are used
*/
virtual PxMaterialTableIndex getTriangleMaterialIndex(PxTriangleID triangleIndex) const = 0;
/**
\brief Returns a triangle face normal for a given triangle index
\note This function takes a post cooking triangle index.
\param[in] triangleIndex (internal) index of desired triangle
\return Triangle normal for a given triangle index
*/
virtual PxVec3 getTriangleNormal(PxTriangleID triangleIndex) const = 0;
/**
\brief Returns heightfield sample of given row and column
\param[in] row Given heightfield row
\param[in] column Given heightfield column
\return Heightfield sample
*/
virtual const PxHeightFieldSample& getSample(PxU32 row, PxU32 column) const = 0;
/**
\brief Returns the number of times the heightfield data has been modified
This method returns the number of times modifySamples has been called on this heightfield, so that code that has
retained state that depends on the heightfield can efficiently determine whether it has been modified.
\return the number of times the heightfield sample data has been modified.
*/
virtual PxU32 getTimestamp() const = 0;
virtual const char* getConcreteTypeName() const { return "PxHeightField"; }
protected:
PX_INLINE PxHeightField(PxType concreteType, PxBaseFlags baseFlags) : PxRefCounted(concreteType, baseFlags) {}
PX_INLINE PxHeightField(PxBaseFlags baseFlags) : PxRefCounted(baseFlags) {}
virtual ~PxHeightField() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxHeightField", PxRefCounted); }
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 9,251 | C | 36.918033 | 184 | 0.778619 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxTriangleMesh.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TRIANGLE_MESH_H
#define PX_TRIANGLE_MESH_H
/** \addtogroup geomutils
@{ */
#include "foundation/PxVec3.h"
#include "foundation/PxBounds3.h"
#include "common/PxPhysXCommonConfig.h"
#include "common/PxBase.h"
#include "foundation/PxUserAllocated.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Mesh midphase structure. This enum is used to select the desired acceleration structure for midphase queries
(i.e. raycasts, overlaps, sweeps vs triangle meshes).
The PxMeshMidPhase::eBVH33 structure is the one used in recent PhysX versions (up to PhysX 3.3). It has great performance and is
supported on all platforms. It is deprecated since PhysX 5.x.
The PxMeshMidPhase::eBVH34 structure is a revisited implementation introduced in PhysX 3.4. It can be significantly faster both
in terms of cooking performance and runtime performance.
*/
struct PxMeshMidPhase
{
enum Enum
{
eBVH33 = 0, //!< Default midphase mesh structure, as used up to PhysX 3.3 (deprecated)
eBVH34 = 1, //!< New midphase mesh structure, introduced in PhysX 3.4
eLAST
};
};
/**
\brief Flags for the mesh geometry properties.
Used in ::PxTriangleMeshFlags.
*/
struct PxTriangleMeshFlag
{
enum Enum
{
e16_BIT_INDICES = (1<<1), //!< The triangle mesh has 16bits vertex indices.
eADJACENCY_INFO = (1<<2), //!< The triangle mesh has adjacency information build.
ePREFER_NO_SDF_PROJ = (1<<3)//!< Indicates that this mesh would preferably not be the mesh projected for mesh-mesh collision. This can indicate that the mesh is not well tessellated.
};
};
/**
\brief collection of set bits defined in PxTriangleMeshFlag.
@see PxTriangleMeshFlag
*/
typedef PxFlags<PxTriangleMeshFlag::Enum,PxU8> PxTriangleMeshFlags;
PX_FLAGS_OPERATORS(PxTriangleMeshFlag::Enum,PxU8)
/**
\brief A triangle mesh, also called a 'polygon soup'.
It is represented as an indexed triangle list. There are no restrictions on the
triangle data.
To avoid duplicating data when you have several instances of a particular
mesh positioned differently, you do not use this class to represent a
mesh object directly. Instead, you create an instance of this mesh via
the PxTriangleMeshGeometry and PxShape classes.
<h3>Creation</h3>
To create an instance of this class call PxPhysics::createTriangleMesh(),
and release() to delete it. This is only possible
once you have released all of its PxShape instances.
<h3>Visualizations:</h3>
\li #PxVisualizationParameter::eCOLLISION_AABBS
\li #PxVisualizationParameter::eCOLLISION_SHAPES
\li #PxVisualizationParameter::eCOLLISION_AXES
\li #PxVisualizationParameter::eCOLLISION_FNORMALS
\li #PxVisualizationParameter::eCOLLISION_EDGES
@see PxTriangleMeshDesc PxTriangleMeshGeometry PxShape PxPhysics.createTriangleMesh()
*/
class PxTriangleMesh : public PxRefCounted
{
public:
/**
\brief Returns the number of vertices.
\return number of vertices
@see getVertices()
*/
virtual PxU32 getNbVertices() const = 0;
/**
\brief Returns the vertices.
\return array of vertices
@see getNbVertices()
*/
virtual const PxVec3* getVertices() const = 0;
/**
\brief Returns all mesh vertices for modification.
This function will return the vertices of the mesh so that their positions can be changed in place.
After modifying the vertices you must call refitBVH for the refitting to actually take place.
This function maintains the old mesh topology (triangle indices).
\return inplace vertex coordinates for each existing mesh vertex.
\note It is recommended to use this feature for scene queries only.
\note Size of array returned is equal to the number returned by getNbVertices().
\note This function operates on cooked vertex indices.
\note This means the index mapping and vertex count can be different from what was provided as an input to the cooking routine.
\note To achieve unchanged 1-to-1 index mapping with orignal mesh data (before cooking) please use the following cooking flags:
\note eWELD_VERTICES = 0, eDISABLE_CLEAN_MESH = 1.
\note It is also recommended to make sure that a call to validateTriangleMesh returns true if mesh cleaning is disabled.
@see getNbVertices()
@see refitBVH()
*/
virtual PxVec3* getVerticesForModification() = 0;
/**
\brief Refits BVH for mesh vertices.
This function will refit the mesh BVH to correctly enclose the new positions updated by getVerticesForModification.
Mesh BVH will not be reoptimized by this function so significantly different new positions will cause significantly reduced performance.
\return New bounds for the entire mesh.
\note For PxMeshMidPhase::eBVH34 trees the refit operation is only available on non-quantized trees (see PxBVH34MidphaseDesc::quantized)
\note PhysX does not keep a mapping from the mesh to mesh shapes that reference it.
\note Call PxShape::setGeometry on each shape which references the mesh, to ensure that internal data structures are updated to reflect the new geometry.
\note PxShape::setGeometry does not guarantee correct/continuous behavior when objects are resting on top of old or new geometry.
\note It is also recommended to make sure that a call to validateTriangleMesh returns true if mesh cleaning is disabled.
\note Active edges information will be lost during refit, the rigid body mesh contact generation might not perform as expected.
@see getNbVertices()
@see getVerticesForModification()
@see PxBVH34MidphaseDesc::quantized
*/
virtual PxBounds3 refitBVH() = 0;
/**
\brief Returns the number of triangles.
\return number of triangles
@see getTriangles() getTrianglesRemap()
*/
virtual PxU32 getNbTriangles() const = 0;
/**
\brief Returns the triangle indices.
The indices can be 16 or 32bit depending on the number of triangles in the mesh.
Call getTriangleMeshFlags() to know if the indices are 16 or 32 bits.
The number of indices is the number of triangles * 3.
\return array of triangles
@see getNbTriangles() getTriangleMeshFlags() getTrianglesRemap()
*/
virtual const void* getTriangles() const = 0;
/**
\brief Reads the PxTriangleMesh flags.
See the list of flags #PxTriangleMeshFlag
\return The values of the PxTriangleMesh flags.
@see PxTriangleMesh
*/
virtual PxTriangleMeshFlags getTriangleMeshFlags() const = 0;
/**
\brief Returns the triangle remapping table.
The triangles are internally sorted according to various criteria. Hence the internal triangle order
does not always match the original (user-defined) order. The remapping table helps finding the old
indices knowing the new ones:
remapTable[ internalTriangleIndex ] = originalTriangleIndex
\return the remapping table (or NULL if 'PxCookingParams::suppressTriangleMeshRemapTable' has been used)
@see getNbTriangles() getTriangles() PxCookingParams::suppressTriangleMeshRemapTable
*/
virtual const PxU32* getTrianglesRemap() const = 0;
/**
\brief Decrements the reference count of a triangle mesh and releases it if the new reference count is zero.
@see PxPhysics.createTriangleMesh()
*/
virtual void release() = 0;
/**
\brief Returns material table index of given triangle
This function takes a post cooking triangle index.
\param[in] triangleIndex (internal) index of desired triangle
\return Material table index, or 0xffff if no per-triangle materials are used
*/
virtual PxMaterialTableIndex getTriangleMaterialIndex(PxTriangleID triangleIndex) const = 0;
/**
\brief Returns the local-space (vertex space) AABB from the triangle mesh.
\return local-space bounds
*/
virtual PxBounds3 getLocalBounds() const = 0;
/**
\brief Returns the local-space Signed Distance Field for this mesh if it has one.
\return local-space SDF.
*/
virtual const PxReal* getSDF() const = 0;
/**
\brief Returns the resolution of the local-space dense SDF.
*/
virtual void getSDFDimensions(PxU32& numX, PxU32& numY, PxU32& numZ) const = 0;
/**
\brief Sets whether this mesh should be preferred for SDF projection.
By default, meshes are flagged as preferring projection and the decisions on which mesh to project is based on the triangle and vertex
count. The model with the fewer triangles is projected onto the SDF of the more detailed mesh.
If one of the meshes is set to prefer SDF projection (default) and the other is set to not prefer SDF projection, model flagged as
preferring SDF projection will be projected onto the model flagged as not preferring, regardless of the detail of the respective meshes.
Where both models are flagged as preferring no projection, the less detailed model will be projected as before.
\param[in] preferProjection Indicates if projection is preferred
*/
virtual void setPreferSDFProjection(bool preferProjection) = 0;
/**
\brief Returns whether this mesh prefers SDF projection.
\return whether this mesh prefers SDF projection.
*/
virtual bool getPreferSDFProjection() const = 0;
/**
\brief Returns the mass properties of the mesh assuming unit density.
The following relationship holds between mass and volume:
mass = volume * density
The mass of a unit density mesh is equal to its volume, so this function returns the volume of the mesh.
Similarly, to obtain the localInertia of an identically shaped object with a uniform density of d, simply multiply the
localInertia of the unit density mesh by d.
\param[out] mass The mass of the mesh assuming unit density.
\param[out] localInertia The inertia tensor in mesh local space assuming unit density.
\param[out] localCenterOfMass Position of center of mass (or centroid) in mesh local space.
*/
virtual void getMassInformation(PxReal& mass, PxMat33& localInertia, PxVec3& localCenterOfMass) const = 0;
protected:
PX_INLINE PxTriangleMesh(PxType concreteType, PxBaseFlags baseFlags) : PxRefCounted(concreteType, baseFlags) {}
PX_INLINE PxTriangleMesh(PxBaseFlags baseFlags) : PxRefCounted(baseFlags) {}
virtual ~PxTriangleMesh() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxTriangleMesh", PxRefCounted); }
};
/**
\brief A triangle mesh containing the PxMeshMidPhase::eBVH33 structure.
@see PxMeshMidPhase
@deprecated
*/
class PX_DEPRECATED PxBVH33TriangleMesh : public PxTriangleMesh
{
public:
protected:
PX_INLINE PxBVH33TriangleMesh(PxType concreteType, PxBaseFlags baseFlags) : PxTriangleMesh(concreteType, baseFlags) {}
PX_INLINE PxBVH33TriangleMesh(PxBaseFlags baseFlags) : PxTriangleMesh(baseFlags) {}
virtual ~PxBVH33TriangleMesh() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxBVH33TriangleMesh", PxTriangleMesh); }
};
/**
\brief A triangle mesh containing the PxMeshMidPhase::eBVH34 structure.
@see PxMeshMidPhase
*/
class PxBVH34TriangleMesh : public PxTriangleMesh
{
public:
protected:
PX_INLINE PxBVH34TriangleMesh(PxType concreteType, PxBaseFlags baseFlags) : PxTriangleMesh(concreteType, baseFlags) {}
PX_INLINE PxBVH34TriangleMesh(PxBaseFlags baseFlags) : PxTriangleMesh(baseFlags) {}
virtual ~PxBVH34TriangleMesh() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxBVH34TriangleMesh", PxTriangleMesh); }
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
| 12,874 | C | 36.979351 | 184 | 0.771167 |
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxCustomGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_CUSTOMGEOMETRY_H
#define PX_CUSTOMGEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#include "geometry/PxGeometryHit.h"
#include "geometry/PxGeometryQueryContext.h"
#include "foundation/PxFoundationConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxContactBuffer;
class PxRenderOutput;
class PxMassProperties;
/**
\brief Custom geometry class. This class allows user to create custom geometries by providing a set of virtual callback functions.
*/
class PxCustomGeometry : public PxGeometry
{
public:
/**
\brief For internal use
*/
PX_PHYSX_COMMON_API static PxU32 getUniqueID();
/**
\brief The type of a custom geometry. Allows to identify a particular kind of it.
*/
struct Type
{
/**
\brief Default constructor
*/
PX_INLINE Type() : mID(getUniqueID()) {}
/**
\brief Default constructor
*/
PX_INLINE Type(const Type& t) : mID(t.mID) {}
/**
\brief Assigment operator
*/
PX_INLINE Type& operator = (const Type& t) { mID = t.mID; return *this; }
/**
\brief Equality operator
*/
PX_INLINE bool operator == (const Type& t) const { return mID == t.mID; }
/**
\brief Inequality operator
*/
PX_INLINE bool operator != (const Type& t) const { return mID != t.mID; }
/**
\brief Invalid type
*/
PX_INLINE static Type INVALID() { PxU32 z(0); return reinterpret_cast<const Type&>(z); }
private:
PxU32 mID;
};
/**
\brief Custom geometry callbacks structure. User should inherit this and implement all pure virtual functions.
*/
struct Callbacks
{
/**
\brief Return custom type. The type purpose is for user to differentiate custom geometries. Not used by PhysX.
\return Unique ID of a custom geometry type.
\note User should use DECLARE_CUSTOM_GEOMETRY_TYPE and IMPLEMENT_CUSTOM_GEOMETRY_TYPE intead of overwriting this function.
*/
virtual Type getCustomType() const = 0;
/**
\brief Return local bounds.
\param[in] geometry This geometry.
\return Bounding box in the geometry local space.
*/
virtual PxBounds3 getLocalBounds(const PxGeometry& geometry) const = 0;
/**
\brief Contacts generation. Generate collision contacts between two geometries in given poses.
\param[in] geom0 This custom geometry
\param[in] geom1 The other geometry
\param[in] pose0 This custom geometry pose
\param[in] pose1 The other geometry pose
\param[in] contactDistance The distance at which contacts begin to be generated between the pairs
\param[in] meshContactMargin The mesh contact margin.
\param[in] toleranceLength The toleranceLength. Used for scaling distance-based thresholds internally to produce appropriate results given simulations in different units
\param[out] contactBuffer A buffer to write contacts to.
\return True if there are contacts. False otherwise.
*/
virtual bool generateContacts(const PxGeometry& geom0, const PxGeometry& geom1, const PxTransform& pose0, const PxTransform& pose1,
const PxReal contactDistance, const PxReal meshContactMargin, const PxReal toleranceLength,
PxContactBuffer& contactBuffer) const = 0;
/**
\brief Raycast. Cast a ray against the geometry in given pose.
\param[in] origin Origin of the ray.
\param[in] unitDir Normalized direction of the ray.
\param[in] geom This custom geometry
\param[in] pose This custom geometry pose
\param[in] maxDist Length of the ray. Has to be in the [0, inf) range.
\param[in] hitFlags Specifies which properties per hit should be computed and returned via the hit callback.
\param[in] maxHits max number of returned hits = size of 'rayHits' buffer
\param[out] rayHits Ray hits.
\param[in] stride Ray hit structure stride.
\param[in] threadContext Optional user-defined per-thread context.
\return Number of hits.
*/
virtual PxU32 raycast(const PxVec3& origin, const PxVec3& unitDir, const PxGeometry& geom, const PxTransform& pose,
PxReal maxDist, PxHitFlags hitFlags, PxU32 maxHits, PxGeomRaycastHit* rayHits, PxU32 stride, PxRaycastThreadContext* threadContext) const = 0;
/**
\brief Overlap. Test if geometries overlap.
\param[in] geom0 This custom geometry
\param[in] pose0 This custom geometry pose
\param[in] geom1 The other geometry
\param[in] pose1 The other geometry pose
\param[in] threadContext Optional user-defined per-thread context.
\return True if there is overlap. False otherwise.
*/
virtual bool overlap(const PxGeometry& geom0, const PxTransform& pose0, const PxGeometry& geom1, const PxTransform& pose1, PxOverlapThreadContext* threadContext) const = 0;
/**
\brief Sweep. Sweep geom1 against geom0.
\param[in] unitDir Normalized direction of the sweep. geom1 is swept along this direction.
\param[in] maxDist Length of the sweep. Has to be in the [0, inf) range.
\param[in] geom0 This custom geometry
\param[in] pose0 This custom geometry pose
\param[in] geom1 The other geometry
\param[in] pose1 The other geometry pose
\param[out] sweepHit Used to report the sweep hit.
\param[in] hitFlags Specifies which properties per hit should be computed and returned via the hit callback.
\param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping.
\param[in] threadContext Optional user-defined per-thread context.
\return True if there is hit. False otherwise.
*/
virtual bool sweep(const PxVec3& unitDir, const PxReal maxDist,
const PxGeometry& geom0, const PxTransform& pose0, const PxGeometry& geom1, const PxTransform& pose1,
PxGeomSweepHit& sweepHit, PxHitFlags hitFlags, const PxReal inflation, PxSweepThreadContext* threadContext) const = 0;
/**
\brief Visualize custom geometry for debugging. Optional.
\param[in] geometry This geometry.
\param[in] out Render output.
\param[in] absPose Geometry absolute transform.
\param[in] cullbox Region to visualize.
*/
virtual void visualize(const PxGeometry& geometry, PxRenderOutput& out, const PxTransform& absPose, const PxBounds3& cullbox) const = 0;
/**
\brief Compute custom geometry mass properties. For geometries usable with dynamic rigidbodies.
\param[in] geometry This geometry.
\param[out] massProperties Mass properties to compute.
*/
virtual void computeMassProperties(const PxGeometry& geometry, PxMassProperties& massProperties) const = 0;
/**
\brief Compatible with PhysX's PCM feature. Allows to optimize contact generation.
\param[in] geometry This geometry.
\param[out] breakingThreshold The threshold to trigger contacts re-generation.
*/
virtual bool usePersistentContactManifold(const PxGeometry& geometry, PxReal& breakingThreshold) const = 0;
/* Destructor */
virtual ~Callbacks() {}
};
/**
\brief Default constructor.
Creates an empty object with a NULL callbacks pointer.
*/
PX_INLINE PxCustomGeometry() :
PxGeometry(PxGeometryType::eCUSTOM),
callbacks(NULL)
{}
/**
\brief Constructor.
*/
PX_INLINE PxCustomGeometry(Callbacks& _callbacks) :
PxGeometry(PxGeometryType::eCUSTOM),
callbacks(&_callbacks)
{}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxCustomGeometry(const PxCustomGeometry& that) :
PxGeometry(that),
callbacks(that.callbacks)
{}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxCustomGeometry& that)
{
mType = that.mType;
callbacks = that.callbacks;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid for shape creation.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_INLINE bool isValid() const;
/**
\brief Returns the custom type of the custom geometry.
*/
PX_INLINE Type getCustomType() const
{
return callbacks ? callbacks->getCustomType() : Type::INVALID();
}
public:
Callbacks* callbacks; //!< A reference to the callbacks object.
};
PX_INLINE bool PxCustomGeometry::isValid() const
{
return mType == PxGeometryType::eCUSTOM && callbacks != NULL;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/**
\brief Used in pair with IMPLEMENT_CUSTOM_GEOMETRY_TYPE to overwrite Callbacks::getCustomType() callback.
*/
#define DECLARE_CUSTOM_GEOMETRY_TYPE \
static ::physx::PxCustomGeometry::Type TYPE(); \
virtual ::physx::PxCustomGeometry::Type getCustomType() const;
/**
\brief Used in pair with DECLARE_CUSTOM_GEOMETRY_TYPE to overwrite Callbacks::getCustomType() callback.
*/
#define IMPLEMENT_CUSTOM_GEOMETRY_TYPE(CLASS) \
::physx::PxCustomGeometry::Type CLASS::TYPE() \
{ \
static ::physx::PxCustomGeometry::Type customType; \
return customType; \
} \
::physx::PxCustomGeometry::Type CLASS::getCustomType() const \
{ \
return TYPE(); \
}
/** @} */
#endif
| 10,755 | C | 33.585209 | 175 | 0.716969 |
NVIDIA-Omniverse/PhysX/physx/include/solver/PxSolverDefs.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SOLVER_DEFS_H
#define PX_SOLVER_DEFS_H
#include "PxPhysXConfig.h"
#include "foundation/PxVec3.h"
#include "foundation/PxMat33.h"
#include "foundation/PxTransform.h"
#include "PxConstraintDesc.h"
#include "geomutils/PxContactPoint.h"
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4324) // structure was padded due to alignment
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
struct PxTGSSolverBodyVel;
/**
\brief Struct that the solver uses to store velocity updates for a body
*/
struct PxSolverBody
{
PX_ALIGN(16, PxVec3) linearVelocity; //!< Delta linear velocity computed by the solver
PxU16 maxSolverNormalProgress; //!< Progress counter used by constraint batching and parallel island solver.
PxU16 maxSolverFrictionProgress; //!< Progress counter used by constraint batching and parallel island solver.
PxVec3 angularState; //!< Delta angular velocity state computed by the solver.
PxU32 solverProgress; //!< Progress counter used by constraint batching and parallel island solver
PxSolverBody() : linearVelocity(0.f), maxSolverNormalProgress(0), maxSolverFrictionProgress(0), angularState(0), solverProgress(0)
{
}
};
PX_COMPILE_TIME_ASSERT(sizeof(PxSolverBody) == 32);
/**
\brief Struct that the solver uses to store the state and other properties of a body
*/
struct PxSolverBodyData
{
PX_ALIGN(16, PxVec3 linearVelocity); //!< 12 Pre-solver linear velocity
PxReal invMass; //!< 16 inverse mass
PxVec3 angularVelocity; //!< 28 Pre-solver angular velocity
PxReal reportThreshold; //!< 32 contact force threshold
PxMat33 sqrtInvInertia; //!< 68 inverse inertia in world space
PxReal penBiasClamp; //!< 72 the penetration bias clamp
PxU32 nodeIndex; //!< 76 the node idx of this solverBodyData. Used by solver to reference between solver bodies and island bodies. Not required by immediate mode
PxReal maxContactImpulse; //!< 80 the max contact impulse
PxTransform body2World; //!< 108 the body's transform
PxU16 pad; //!< 112 pad
PX_FORCE_INLINE PxReal projectVelocity(const PxVec3& lin, const PxVec3& ang) const
{
return linearVelocity.dot(lin) + angularVelocity.dot(ang);
}
};
PX_COMPILE_TIME_ASSERT(0 == (sizeof(PxSolverBodyData) & 15));
//----------------------------------
/**
\brief A header that defines the size of a specific batch of constraints (of same type and without dependencies)
*/
struct PxConstraintBatchHeader
{
PxU32 startIndex; //!< Start index for this batch
PxU16 stride; //!< Number of constraints in this batch (range: 1-4)
PxU16 constraintType; //!< The type of constraint this batch references
};
/**
\brief Constraint descriptor used inside the solver
*/
struct PxSolverConstraintDesc
{
static const PxU16 RIGID_BODY = 0xffff;
enum ConstraintType
{
eCONTACT_CONSTRAINT, //!< Defines this pair is a contact constraint
eJOINT_CONSTRAINT //!< Defines this pair is a joint constraint
};
union
{
PxSolverBody* bodyA; //!< bodyA pointer
PxTGSSolverBodyVel* tgsBodyA; //!< bodyA pointer
void* articulationA; //!< Articulation pointer for body A
};
union
{
PxSolverBody* bodyB; //!< BodyB pointer
PxTGSSolverBodyVel* tgsBodyB; //!< BodyB pointer
void* articulationB; //!< Articulation pointer for body B
};
PxU32 bodyADataIndex; //!< Body A's index into the SolverBodyData array
PxU32 bodyBDataIndex; //!< Body B's index into the SolverBodyData array
PxU32 linkIndexA; //!< Link index defining which link in Articulation A this constraint affects. If not an articulation, must be PxSolverConstraintDesc::RIGID_BODY
PxU32 linkIndexB; //!< Link index defining which link in Articulation B this constraint affects. If not an articulation, must be PxSolverConstraintDesc::RIGID_BODY
PxU8* constraint; //!< Pointer to the constraint rows to be solved
void* writeBack; //!< Pointer to the writeback structure results for this given constraint are to be written to
PxU16 progressA; //!< Internal progress counter
PxU16 progressB; //!< Internal progress counter
PxU16 constraintLengthOver16; //!< constraintLength/16, max constraint length is 1MB
PxU8 padding[10];
};
/**
\brief Data structure used for preparing constraints before solving them
*/
struct PxSolverConstraintPrepDescBase
{
enum BodyState
{
eDYNAMIC_BODY = 1 << 0,
eSTATIC_BODY = 1 << 1,
eKINEMATIC_BODY = 1 << 2,
eARTICULATION = 1 << 3
};
PxConstraintInvMassScale invMassScales; //!< In: The local mass scaling for this pair.
PxSolverConstraintDesc* desc; //!< Output: The PxSolverConstraintDesc filled in by contact prep
const PxSolverBody* body0; //!< In: The first body. Stores velocity information. Unused unless contact involves articulations.
const PxSolverBody* body1; //!< In: The second body. Stores velocity information. Unused unless contact involves articulations.
const PxSolverBodyData* data0; //!< In: The first PxSolverBodyData. Stores mass and miscellaneous information for the first body.
const PxSolverBodyData* data1; //!< In: The second PxSolverBodyData. Stores mass and miscellaneous information for the second body
PxTransform bodyFrame0; //!< In: The world-space transform of the first body.
PxTransform bodyFrame1; //!< In: The world-space transform of the second body.
BodyState bodyState0; //!< In: Defines what kind of actor the first body is
BodyState bodyState1; //!< In: Defines what kind of actor the second body is
};
/**
\brief Data structure used for preparing constraints before solving them
*/
struct PxSolverConstraintPrepDesc : public PxSolverConstraintPrepDescBase
{
PX_ALIGN(16, Px1DConstraint* rows); //!< The start of the constraint rows
PxU32 numRows; //!< The number of rows
PxReal linBreakForce, angBreakForce; //!< Break forces
PxReal minResponseThreshold; //!< The minimum response threshold
void* writeback; //!< Pointer to constraint writeback structure. Reports back joint breaking. If not required, set to NULL.
bool disablePreprocessing; //!< Disable joint pre-processing. Pre-processing can improve stability but under certain circumstances, e.g. when some invInertia rows are zero/almost zero, can cause instabilities.
bool improvedSlerp; //!< Use improved slerp model
bool driveLimitsAreForces; //!< Indicates whether drive limits are forces
bool extendedLimits; //!< Indicates whether we want to use extended limits
bool disableConstraint; //!< Disables constraint
PxVec3p body0WorldOffset; //!< Body0 world offset
};
/**
\brief Data structure used for preparing constraints before solving them
*/
struct PxSolverContactDesc : public PxSolverConstraintPrepDescBase
{
void* shapeInteraction; //!< Pointer to shape interaction. Used for force threshold reports in solver. Set to NULL if using immediate mode.
PxContactPoint* contacts; //!< The start of the contacts for this pair
PxU32 numContacts; //!< The total number of contacts this pair references.
bool hasMaxImpulse; //!< Defines whether this pairs has maxImpulses clamping enabled
bool disableStrongFriction; //!< Defines whether this pair disables strong friction (sticky friction correlation)
bool hasForceThresholds; //!< Defines whether this pair requires force thresholds
PxReal restDistance; //!< A distance at which the solver should aim to hold the bodies separated. Default is 0
PxReal maxCCDSeparation; //!< A distance used to configure speculative CCD behavior. Default is PX_MAX_F32. Set internally in PhysX for bodies with eENABLE_SPECULATIVE_CCD on. Do not set directly!
PxU8* frictionPtr; //!< InOut: Friction patch correlation data. Set each frame by solver. Can be retained for improved behavior or discarded each frame.
PxU8 frictionCount; //!< The total number of friction patches in this pair
PxReal* contactForces; //!< Out: A buffer for the solver to write applied contact forces to.
PxU32 startFrictionPatchIndex; //!< Start index of friction patch in the correlation buffer. Set by friction correlation
PxU32 numFrictionPatches; //!< Total number of friction patches in this pair. Set by friction correlation
PxU32 startContactPatchIndex; //!< The start index of this pair's contact patches in the correlation buffer. For internal use only
PxU16 numContactPatches; //!< Total number of contact patches.
PxU16 axisConstraintCount; //!< Axis constraint count. Defines how many constraint rows this pair has produced. Useful for statistical purposes.
PxReal offsetSlop; //!< Slop value used to snap contact line of action back in-line with the COM.
//PxU8 pad[16 - sizeof(void*)];
};
class PxConstraintAllocator
{
public:
/**
\brief Allocates constraint data. It is the application's responsibility to release this memory after PxSolveConstraints has completed.
\param[in] byteSize Allocation size in bytes
\return The allocated memory. This address must be 16-byte aligned.
*/
virtual PxU8* reserveConstraintData(const PxU32 byteSize) = 0;
/**
\brief Allocates friction data. Friction data can be retained by the application for a given pair and provided as an input to PxSolverContactDesc to improve simulation stability.
It is the application's responsibility to release this memory. If this memory is released, the application should ensure it does not pass pointers to this memory to PxSolverContactDesc.
\param[in] byteSize Allocation size in bytes
\return The allocated memory. This address must be 4-byte aligned.
*/
virtual PxU8* reserveFrictionData(const PxU32 byteSize) = 0;
virtual ~PxConstraintAllocator() {}
};
/** \addtogroup physics
@{ */
struct PxArticulationAxis
{
enum Enum
{
eTWIST = 0, //!< Rotational about eX
eSWING1 = 1, //!< Rotational about eY
eSWING2 = 2, //!< Rotational about eZ
eX = 3, //!< Linear in eX
eY = 4, //!< Linear in eY
eZ = 5, //!< Linear in eZ
eCOUNT = 6
};
};
PX_FLAGS_OPERATORS(PxArticulationAxis::Enum, PxU8)
struct PxArticulationMotion
{
enum Enum
{
eLOCKED = 0, //!< Locked axis, i.e. degree of freedom (DOF)
eLIMITED = 1, //!< Limited DOF - set limits of joint DOF together with this flag, see PxArticulationJointReducedCoordinate::setLimitParams
eFREE = 2 //!< Free DOF
};
};
typedef PxFlags<PxArticulationMotion::Enum, PxU8> PxArticulationMotions;
PX_FLAGS_OPERATORS(PxArticulationMotion::Enum, PxU8)
struct PxArticulationJointType
{
enum Enum
{
eFIX = 0, //!< All joint axes, i.e. degrees of freedom (DOFs) locked
ePRISMATIC = 1, //!< Single linear DOF, e.g. cart on a rail
eREVOLUTE = 2, //!< Single rotational DOF, e.g. an elbow joint or a rotational motor, position wrapped at 2pi radians
eREVOLUTE_UNWRAPPED = 3, //!< Single rotational DOF, e.g. an elbow joint or a rotational motor, position not wrapped
eSPHERICAL = 4, //!< Ball and socket joint with two or three DOFs
eUNDEFINED = 5
};
};
struct PxArticulationFlag
{
enum Enum
{
eFIX_BASE = (1 << 0), //!< Set articulation base to be fixed.
eDRIVE_LIMITS_ARE_FORCES = (1<<1), //!< Limits for drive effort are forces and torques rather than impulses, see PxArticulationDrive::maxForce.
eDISABLE_SELF_COLLISION = (1<<2), //!< Disable collisions between the articulation's links (note that parent/child collisions are disabled internally in either case).
eCOMPUTE_JOINT_FORCES = (1<<3) //!< @deprecated Enable in order to be able to query joint solver (i.e. constraint) forces using PxArticulationCache::jointSolverForces.
};
};
typedef PxFlags<PxArticulationFlag::Enum, PxU8> PxArticulationFlags;
PX_FLAGS_OPERATORS(PxArticulationFlag::Enum, PxU8)
struct PxArticulationDriveType
{
enum Enum
{
eFORCE = 0, //!< The output of the implicit spring drive controller is a force/torque.
eACCELERATION = 1, //!< The output of the implicit spring drive controller is a joint acceleration (use this to get (spatial)-inertia-invariant behavior of the drive).
eTARGET = 2, //!< Sets the drive gains internally to track a target position almost kinematically (i.e. with very high drive gains).
eVELOCITY = 3, //!< Sets the drive gains internally to track a target velocity almost kinematically (i.e. with very high drive gains).
eNONE = 4
};
};
/**
\brief Data structure to set articulation joint limits.
- The lower limit should be strictly smaller than the higher limit. If the limits should be equal, use PxArticulationMotion::eLOCKED
and an appropriate offset in the parent/child joint frames.
- The limit units are linear units (equivalent to scene units) for a translational axis, or radians for a rotational axis.
@see PxArticulationJointReducedCoordinate::setLimitParams, PxArticulationReducedCoordinate
*/
struct PxArticulationLimit
{
PxArticulationLimit(){}
PxArticulationLimit(const PxReal low_, const PxReal high_)
{
low = low_;
high = high_;
}
/**
\brief The lower limit on the joint axis position.
<b>Range:</b> [-PX_MAX_F32, high)<br>
<b>Default:</b> 0.0f<br>
*/
PxReal low;
/**
\brief The higher limit on the joint axis position.
<b>Range:</b> (low, PX_MAX_F32]<br>
<b>Default:</b> 0.0f<br>
*/
PxReal high;
};
/**
\brief Data structure for articulation joint drive configuration.
@see PxArticulationJointReducedCoordinate::setDriveParams, PxArticulationReducedCoordinate
*/
struct PxArticulationDrive
{
PxArticulationDrive(){}
PxArticulationDrive(const PxReal stiffness_, const PxReal damping_, const PxReal maxForce_, PxArticulationDriveType::Enum driveType_=PxArticulationDriveType::eFORCE)
{
stiffness = stiffness_;
damping = damping_;
maxForce = maxForce_;
driveType = driveType_;
}
/**
\brief The drive stiffness, i.e. the proportional gain of the implicit PD controller.
See manual for further information, and the drives' implicit spring-damper (i.e. PD control) implementation in particular.
<b>Units:</b> (distance = linear scene units)<br>
Rotational axis: torque/rad if driveType = PxArticulationDriveType::eFORCE; or (rad/s^2)/rad if driveType = PxArticulationDriveType::eACCELERATION<br>
Translational axis: force/distance if driveType = PxArticulationDriveType::eFORCE; or (distance/s^2)/distance if driveType = PxArticulationDriveType::eACCELERATION<br>
<b>Range:</b> [0, PX_MAX_F32]<br>
<b>Default:</b> 0.0f<br>
*/
PxReal stiffness;
/**
\brief The drive damping, i.e. the derivative gain of the implicit PD controller.
See manual for further information, and the drives' implicit spring-damper (i.e. PD control) implementation in particular.
<b>Units:</b> (distance = linear scene units)<br>
Rotational axis: torque/(rad/s) if driveType = PxArticulationDriveType::eFORCE; or (rad/s^2)/(rad/s) if driveType = PxArticulationDriveType::eACCELERATION<br>
Translational axis: force/(distance/s) if driveType = PxArticulationDriveType::eFORCE; or (distance/s^2)/(distance/s) if driveType = PxArticulationDriveType::eACCELERATION<br>
<b>Range:</b> [0, PX_MAX_F32]<br>
<b>Default:</b> 0.0f<br>
*/
PxReal damping;
/**
\brief The drive force limit.
- The limit is enforced regardless of the drive type #PxArticulationDriveType.
- The limit corresponds to a force (linear axis) or torque (rotational axis) if PxArticulationFlag::eDRIVE_LIMITS_ARE_FORCES is set, and to an impulse (force|torque * dt) otherwise.
<b>Range:</b> [0, PX_MAX_F32]<br>
<b>Default:</b> 0.0f<br>
@see PxArticulationFlag::eDRIVE_LIMITS_ARE_FORCES
*/
PxReal maxForce;
/**
\brief The drive type.
@see PxArticulationDriveType
*/
PxArticulationDriveType::Enum driveType;
};
/** @} */
struct PxTGSSolverBodyVel
{
PX_ALIGN(16, PxVec3) linearVelocity; //12
PxU16 nbStaticInteractions; //14 Used to accumulate the number of static interactions
PxU16 maxDynamicPartition; //16 Used to accumulate the max partition of dynamic interactions
PxVec3 angularVelocity; //28
PxU32 partitionMask; //32 Used in partitioning as a bit-field
PxVec3 deltaAngDt; //44
PxReal maxAngVel; //48
PxVec3 deltaLinDt; //60
PxU16 lockFlags; //62
bool isKinematic; //63
PxU8 pad; //64
PX_FORCE_INLINE PxReal projectVelocity(const PxVec3& lin, const PxVec3& ang) const
{
return linearVelocity.dot(lin) + angularVelocity.dot(ang);
}
};
//Needed only by prep, integration and 1D constraints
struct PxTGSSolverBodyTxInertia
{
PxTransform deltaBody2World;
PxMat33 sqrtInvInertia; //!< inverse inertia in world space
};
struct PxTGSSolverBodyData
{
PX_ALIGN(16, PxVec3) originalLinearVelocity; //!< Pre-solver linear velocity.
PxReal maxContactImpulse; //!< The max contact impulse.
PxVec3 originalAngularVelocity; //!< Pre-solver angular velocity
PxReal penBiasClamp; //!< The penetration bias clamp.
PxReal invMass; //!< Inverse mass.
PxU32 nodeIndex; //!< The node idx of this solverBodyData. Used by solver to reference between solver bodies and island bodies. Not required by immediate mode.
PxReal reportThreshold; //!< Contact force threshold.
PxU32 pad;
PxReal projectVelocity(const PxVec3& linear, const PxVec3& angular) const
{
return originalLinearVelocity.dot(linear) + originalAngularVelocity.dot(angular);
}
};
struct PxTGSSolverConstraintPrepDescBase
{
PxConstraintInvMassScale invMassScales; //!< In: The local mass scaling for this pair.
PxSolverConstraintDesc* desc; //!< Output: The PxSolverConstraintDesc filled in by contact prep
const PxTGSSolverBodyVel* body0; //!< In: The first body. Stores velocity information. Unused unless contact involves articulations.
const PxTGSSolverBodyVel* body1; //!< In: The second body. Stores velocity information. Unused unless contact involves articulations.
const PxTGSSolverBodyTxInertia* body0TxI; //!< In: The first PxTGSSolverBodyTxInertia. Stores the delta body to world transform and sqrtInvInertia for first body.
const PxTGSSolverBodyTxInertia* body1TxI; //!< In: The second PxTGSSolverBodyTxInertia. Stores the delta body to world transform and sqrtInvInertia for second body.
const PxTGSSolverBodyData* bodyData0; //!< In: The first PxTGSSolverBodyData. Stores mass and miscellaneous information for the first body.
const PxTGSSolverBodyData* bodyData1; //!< In: The second PxTGSSolverBodyData. Stores mass and miscellaneous information for the second body.
PxTransform bodyFrame0; //!< In: The world-space transform of the first body.
PxTransform bodyFrame1; //!< In: The world-space transform of the second body.
PxSolverContactDesc::BodyState bodyState0; //!< In: Defines what kind of actor the first body is
PxSolverContactDesc::BodyState bodyState1; //!< In: Defines what kind of actor the second body is
};
struct PxTGSSolverConstraintPrepDesc : public PxTGSSolverConstraintPrepDescBase
{
Px1DConstraint* rows; //!< The start of the constraint rows
PxU32 numRows; //!< The number of rows
PxReal linBreakForce, angBreakForce; //!< Break forces
PxReal minResponseThreshold; //!< The minimum response threshold
void* writeback; //!< Pointer to constraint writeback structure. Reports back joint breaking. If not required, set to NULL.
bool disablePreprocessing; //!< Disable joint pre-processing. Pre-processing can improve stability but under certain circumstances, e.g. when some invInertia rows are zero/almost zero, can cause instabilities.
bool improvedSlerp; //!< Use improved slerp model
bool driveLimitsAreForces; //!< Indicates whether drive limits are forces
bool extendedLimits; //!< Indicates whether extended limits are used
bool disableConstraint; //!< Disables constraint
PxVec3p body0WorldOffset; //!< Body0 world offset
PxVec3p cA2w; //!< Location of anchor point A in world space
PxVec3p cB2w; //!< Location of anchor point B in world space
};
struct PxTGSSolverContactDesc : public PxTGSSolverConstraintPrepDescBase
{
void* shapeInteraction; //!< Pointer to shape interaction. Used for force threshold reports in solver. Set to NULL if using immediate mode.
PxContactPoint* contacts; //!< The start of the contacts for this pair
PxU32 numContacts; //!< The total number of contacts this pair references.
bool hasMaxImpulse; //!< Defines whether this pairs has maxImpulses clamping enabled
bool disableStrongFriction; //!< Defines whether this pair disables strong friction (sticky friction correlation)
bool hasForceThresholds; //!< Defines whether this pair requires force thresholds
PxReal restDistance; //!< A distance at which the solver should aim to hold the bodies separated. Default is 0
PxReal maxCCDSeparation; //!< A distance used to configure speculative CCD behavior. Default is PX_MAX_F32. Set internally in PhysX for bodies with eENABLE_SPECULATIVE_CCD on. Do not set directly!
PxU8* frictionPtr; //!< InOut: Friction patch correlation data. Set each frame by solver. Can be retained for improved behavior or discarded each frame.
PxU8 frictionCount; //!< The total number of friction patches in this pair
PxReal* contactForces; //!< Out: A buffer for the solver to write applied contact forces to.
PxU32 startFrictionPatchIndex; //!< Start index of friction patch in the correlation buffer. Set by friction correlation
PxU32 numFrictionPatches; //!< Total number of friction patches in this pair. Set by friction correlation
PxU32 startContactPatchIndex; //!< The start index of this pair's contact patches in the correlation buffer. For internal use only
PxU16 numContactPatches; //!< Total number of contact patches.
PxU16 axisConstraintCount; //!< Axis constraint count. Defines how many constraint rows this pair has produced. Useful for statistical purposes.
PxReal maxImpulse; //!< The maximum impulse the solver is allowed to introduce for this pair of bodies.
PxReal torsionalPatchRadius; //!< This defines the radius of the contact patch used to apply torsional friction.
PxReal minTorsionalPatchRadius; //!< This defines the minimum radius of the contact patch used to apply torsional friction.
PxReal offsetSlop; //!< Slop value used to snap contact line of action back in-line with the COM.
};
#if !PX_DOXYGEN
}
#endif
#if PX_VC
#pragma warning(pop)
#endif
#endif
| 23,999 | C | 42.956044 | 214 | 0.744823 |
NVIDIA-Omniverse/PhysX/physx/buildtools/cmake_generate_projects.py | import sys
import os
import glob
import os.path
import shutil
import subprocess
import xml.etree.ElementTree
def packmanExt():
if sys.platform == 'win32':
return 'cmd'
return 'sh'
def cmakeExt():
if sys.platform == 'win32':
return '.exe'
return ''
def filterPreset(presetName):
winPresetFilter = ['win','switch','crosscompile']
if sys.platform == 'win32':
if any(presetName.find(elem) != -1 for elem in winPresetFilter):
return True
else:
if all(presetName.find(elem) == -1 for elem in winPresetFilter):
return True
return False
def noPresetProvided():
global input
print('Preset parameter required, available presets:')
presetfiles = []
for file in glob.glob("buildtools/presets/*.xml"):
presetfiles.append(file)
if len(presetfiles) == 0:
for file in glob.glob("buildtools/presets/public/*.xml"):
presetfiles.append(file)
counter = 0
presetList = []
for preset in presetfiles:
if filterPreset(preset):
presetXml = xml.etree.ElementTree.parse(preset).getroot()
if(preset.find('user') == -1):
print('(' + str(counter) + ') ' + presetXml.get('name') +
' <--- ' + presetXml.get('comment'))
presetList.append(presetXml.get('name'))
else:
print('(' + str(counter) + ') ' + presetXml.get('name') +
'.user <--- ' + presetXml.get('comment'))
presetList.append(presetXml.get('name') + '.user')
counter = counter + 1
# Fix Python 2.x.
try:
input = raw_input
except NameError:
pass
mode = int(eval(input('Enter preset number: ')))
return presetList[mode]
class CMakePreset:
presetName = ''
targetPlatform = ''
compiler = ''
generator = ''
cmakeSwitches = []
cmakeParams = []
def __init__(self, presetName):
xmlPath = "buildtools/presets/"+presetName+'.xml'
if os.path.isfile(xmlPath):
print('Using preset xml: '+xmlPath)
else:
xmlPath = "buildtools/presets/public/"+presetName+'.xml'
if os.path.isfile(xmlPath):
print('Using preset xml: '+xmlPath)
else:
print('Preset xml file: '+xmlPath+' not found')
exit()
# get the xml
presetNode = xml.etree.ElementTree.parse(xmlPath).getroot()
self.presetName = presetNode.attrib['name']
for platform in presetNode.findall('platform'):
self.targetPlatform = platform.attrib['targetPlatform']
self.compiler = platform.attrib['compiler']
self.generator = platform.get('generator')
print('Target platform: ' + self.targetPlatform +
' using compiler: ' + self.compiler)
if self.generator is not None:
print(' using generator: ' + self.generator)
for cmakeSwitch in presetNode.find('CMakeSwitches'):
cmSwitch = '-D' + \
cmakeSwitch.attrib['name'] + '=' + \
cmakeSwitch.attrib['value'].upper()
self.cmakeSwitches.append(cmSwitch)
for cmakeParam in presetNode.find('CMakeParams'):
if cmakeParam.attrib['name'] == 'CMAKE_INSTALL_PREFIX' or cmakeParam.attrib['name'] == 'PX_OUTPUT_LIB_DIR' or cmakeParam.attrib['name'] == 'PX_OUTPUT_EXE_DIR' or cmakeParam.attrib['name'] == 'PX_OUTPUT_DLL_DIR':
cmParam = '-D' + cmakeParam.attrib['name'] + '=\"' + \
os.environ['PHYSX_ROOT_DIR'] + '/' + \
cmakeParam.attrib['value'] + '\"'
else:
cmParam = '-D' + \
cmakeParam.attrib['name'] + '=' + \
cmakeParam.attrib['value']
self.cmakeParams.append(cmParam)
pass
def isMultiConfigPlatform(self):
if self.targetPlatform == 'linux':
return False
elif self.targetPlatform == 'linuxAarch64':
return False
return True
def getCMakeSwitches(self):
outString = ''
# We need gpuProjectsFound flag to avoid issues when we have both
# PX_GENERATE_GPU_PROJECTS and PX_GENERATE_GPU_PROJECTS_ONLY switches
gpuProjectsFound = False # initialize flag
for cmakeSwitch in self.cmakeSwitches:
outString = outString + ' ' + cmakeSwitch
if not gpuProjectsFound and cmakeSwitch.find('PX_GENERATE_GPU_PROJECTS') != -1:
gpuProjectsFound = True # set flag to True when keyword found
if os.environ.get('PM_CUDA_PATH') is not None:
outString = outString + ' -DCUDAToolkit_ROOT_DIR=' + \
os.environ['PM_CUDA_PATH']
if self.compiler in ['vc15', 'vc16', 'vc17'] and self.generator != 'ninja':
outString = outString + ' -T cuda=' + os.environ['PM_CUDA_PATH']
# TODO: Need to do the same for gcc (aarch64) when we package it with Packman
elif self.compiler == 'clang':
if os.environ.get('PM_clang_PATH') is not None:
outString = outString + ' -DCMAKE_CUDA_HOST_COMPILER=' + \
os.environ['PM_clang_PATH'] + '/bin/clang++'
return outString
def getCMakeParams(self):
outString = ''
for cmakeParam in self.cmakeParams:
outString = outString + ' ' + cmakeParam # + ' --trace'
return outString
def getPlatformCMakeParams(self):
cmake_modules_root = os.environ['PHYSX_ROOT_DIR'] + '/source/compiler/cmake/modules'
outString = ' '
vs_versions = {
'vc15': '\"Visual Studio 15 2017\"',
'vc16': '\"Visual Studio 16 2019\"',
'vc17': '\"Visual Studio 17 2022\"'
}
# Visual studio
if self.compiler in vs_versions:
generator = '-G \"Ninja Multi-Config\"' if self.generator == 'ninja' else '-G ' + vs_versions[self.compiler]
outString += generator
# mac
elif self.compiler == 'xcode':
outString = outString + '-G Xcode'
# Linux
elif self.targetPlatform in ['linux', 'linuxAarch64']:
if self.generator is not None and self.generator == 'ninja':
outString = outString + '-G \"Ninja\"'
outString = outString + ' -DCMAKE_MAKE_PROGRAM=' + os.environ['PM_ninja_PATH'] + '/ninja'
else:
outString = outString + '-G \"Unix Makefiles\"'
if self.targetPlatform == 'win64':
if self.generator != 'ninja':
outString = outString + ' -Ax64'
outString = outString + ' -DTARGET_BUILD_PLATFORM=windows'
outString = outString + ' -DPX_OUTPUT_ARCH=x86'
return outString
elif self.targetPlatform == 'switch64':
outString = outString + ' -DTARGET_BUILD_PLATFORM=switch'
outString = outString + ' -DCMAKE_TOOLCHAIN_FILE=' + \
cmake_modules_root + '/switch/NX64Toolchain.txt'
outString = outString + ' -DCMAKE_GENERATOR_PLATFORM=NX64'
return outString
elif self.targetPlatform == 'linux':
outString = outString + ' -DTARGET_BUILD_PLATFORM=linux'
outString = outString + ' -DPX_OUTPUT_ARCH=x86'
if self.compiler == 'clang-crosscompile':
outString = outString + ' -DCMAKE_TOOLCHAIN_FILE=' + \
cmake_modules_root + '/linux/LinuxCrossToolchain.x86_64-unknown-linux-gnu.cmake'
outString = outString + ' -DCMAKE_MAKE_PROGRAM=' + os.environ.get('PM_MinGW_PATH') + '/bin/mingw32-make.exe'
elif self.compiler == 'clang':
if os.environ.get('PM_clang_PATH') is not None:
outString = outString + ' -DCMAKE_C_COMPILER=' + \
os.environ['PM_clang_PATH'] + '/bin/clang'
outString = outString + ' -DCMAKE_CXX_COMPILER=' + \
os.environ['PM_clang_PATH'] + '/bin/clang++'
else:
outString = outString + ' -DCMAKE_C_COMPILER=clang'
outString = outString + ' -DCMAKE_CXX_COMPILER=clang++'
return outString
elif self.targetPlatform == 'linuxAarch64':
outString = outString + ' -DTARGET_BUILD_PLATFORM=linux'
outString = outString + ' -DPX_OUTPUT_ARCH=arm'
if self.compiler == 'clang-crosscompile':
outString = outString + ' -DCMAKE_TOOLCHAIN_FILE=' + \
cmake_modules_root + '/linux/LinuxCrossToolchain.aarch64-unknown-linux-gnueabihf.cmake'
outString = outString + ' -DCMAKE_MAKE_PROGRAM=' + os.environ.get('PM_MinGW_PATH') + '/bin/mingw32-make.exe'
elif self.compiler == 'gcc':
# TODO: To change so it uses Packman's compiler. Then add it as
# host compiler for CUDA above.
outString = outString + ' -DCMAKE_TOOLCHAIN_FILE=\"' + \
cmake_modules_root + '/linux/LinuxAarch64.cmake\"'
return outString
elif self.targetPlatform == 'mac64':
outString = outString + ' -DTARGET_BUILD_PLATFORM=mac'
outString = outString + ' -DPX_OUTPUT_ARCH=x86'
return outString
return ''
def getCommonParams():
outString = '--no-warn-unused-cli'
outString = outString + ' -DCMAKE_PREFIX_PATH=\"' + os.environ['PM_PATHS'] + '\"'
outString = outString + ' -DPHYSX_ROOT_DIR=\"' + \
os.environ['PHYSX_ROOT_DIR'] + '\"'
outString = outString + ' -DPX_OUTPUT_LIB_DIR=\"' + \
os.environ['PHYSX_ROOT_DIR'] + '\"'
outString = outString + ' -DPX_OUTPUT_BIN_DIR=\"' + \
os.environ['PHYSX_ROOT_DIR'] + '\"'
if os.environ.get('GENERATE_SOURCE_DISTRO') == '1':
outString = outString + ' -DPX_GENERATE_SOURCE_DISTRO=1'
return outString
def cleanupCompilerDir(compilerDirName):
if os.path.exists(compilerDirName):
if sys.platform == 'win32':
os.system('rmdir /S /Q ' + compilerDirName)
else:
shutil.rmtree(compilerDirName, True)
if os.path.exists(compilerDirName) == False:
os.makedirs(compilerDirName)
def presetProvided(pName):
parsedPreset = CMakePreset(pName)
print('PM_PATHS: ' + os.environ['PM_PATHS'])
if os.environ.get('PM_cmake_PATH') is not None:
cmakeExec = os.environ['PM_cmake_PATH'] + '/bin/cmake' + cmakeExt()
else:
cmakeExec = 'cmake' + cmakeExt()
print('Cmake: ' + cmakeExec)
# gather cmake parameters
cmakeParams = parsedPreset.getPlatformCMakeParams()
cmakeParams = cmakeParams + ' ' + getCommonParams()
cmakeParams = cmakeParams + ' ' + parsedPreset.getCMakeSwitches()
cmakeParams = cmakeParams + ' ' + parsedPreset.getCMakeParams()
# print(cmakeParams)
if os.path.isfile(os.environ['PHYSX_ROOT_DIR'] + '/compiler/internal/CMakeLists.txt'):
cmakeMasterDir = 'internal'
else:
cmakeMasterDir = 'public'
if parsedPreset.isMultiConfigPlatform():
# cleanup and create output directory
outputDir = os.path.join('compiler', parsedPreset.presetName)
cleanupCompilerDir(outputDir)
# run the cmake script
#print('Cmake params:' + cmakeParams)
os.chdir(os.path.join(os.environ['PHYSX_ROOT_DIR'], outputDir))
os.system(cmakeExec + ' \"' +
os.environ['PHYSX_ROOT_DIR'] + '/compiler/' + cmakeMasterDir + '\"' + cmakeParams)
os.chdir(os.environ['PHYSX_ROOT_DIR'])
else:
configs = ['debug', 'checked', 'profile', 'release']
for config in configs:
# cleanup and create output directory
outputDir = os.path.join('compiler', parsedPreset.presetName + '-' + config)
cleanupCompilerDir(outputDir)
# run the cmake script
#print('Cmake params:' + cmakeParams)
os.chdir(os.path.join(os.environ['PHYSX_ROOT_DIR'], outputDir))
# print(cmakeExec + ' \"' + os.environ['PHYSX_ROOT_DIR'] + '/compiler/' + cmakeMasterDir + '\"' + cmakeParams + ' -DCMAKE_BUILD_TYPE=' + config)
os.system(cmakeExec + ' \"' + os.environ['PHYSX_ROOT_DIR'] + '/compiler/' +
cmakeMasterDir + '\"' + cmakeParams + ' -DCMAKE_BUILD_TYPE=' + config)
os.chdir(os.environ['PHYSX_ROOT_DIR'])
pass
def main():
if (sys.version_info[0] < 3) or (sys.version_info[0] == 3 and sys.version_info[1] < 5):
print("You are using Python {}. You must use Python 3.5 and up. Please read README.md for requirements.").format(sys.version)
exit()
physx_root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
os.environ['PHYSX_ROOT_DIR'] = physx_root_dir.replace("\\", "/")
if len(sys.argv) != 2:
presetName = noPresetProvided()
if sys.platform == 'win32':
print('Running generate_projects.bat ' + presetName)
cmd = 'generate_projects.bat {}'.format(presetName)
result = subprocess.run(cmd, cwd=os.environ['PHYSX_ROOT_DIR'], check=True, universal_newlines=True)
# TODO: catch exception and add capture errors
else:
print('Running generate_projects.sh ' + presetName)
# TODO: once we have Python 3.7.2 for linux, add the text=True instead of universal_newlines
cmd = './generate_projects.sh {}'.format(presetName)
result = subprocess.run(['bash', './generate_projects.sh', presetName], cwd=os.environ['PHYSX_ROOT_DIR'], check=True, universal_newlines=True)
# TODO: catch exception and add capture errors
else:
presetName = sys.argv[1]
if filterPreset(presetName):
presetProvided(presetName)
else:
print('Preset not supported on this build platform.')
main()
| 14,130 | Python | 42.885093 | 223 | 0.573107 |
NVIDIA-Omniverse/PhysX/physx/buildtools/templates/PxIncludeTemplate.h | ${BOILERPLATE_CONTENT}
#ifndef PX_${HEADER_GUARD_NAME}
#define PX_${HEADER_GUARD_NAME}
${HEADER_CONTENT}
#endif // PX_${HEADER_GUARD_NAME}
| 143 | C | 14.999998 | 34 | 0.692308 |
NVIDIA-Omniverse/PhysX/blast/PACKAGE-INFO.yaml | Package : blast-sdk
Maintainers : Bryan Galdrikian, Eric Arnold
Description : Blast destruction SDK
SWIPAT NvBug :
Repository : https://gitlab-master.nvidia.com/omniverse/blast-sdk
License Type : NVIDIA | 203 | YAML | 32.999995 | 65 | 0.79803 |
NVIDIA-Omniverse/PhysX/blast/repo.toml | ########################################################################################################################
# Repo tool base settings
########################################################################################################################
[repo]
# Repository Name. It is used for solution name and final package name
name = "blast-sdk"
########################################################################################################################
# Build tool setup
########################################################################################################################
[repo_build]
# List of packman projects to pull (in order)
fetch.packman_host_files_to_pull = [
"${root}/deps/host-deps.packman.xml",
]
fetch.packman_target_files_to_pull = [
"${root}/deps/target-deps.packman.xml",
]
vscode.python = "${root}/_build/target-deps/python36"
vscode.python_env.PYTHONPATH= [
"$$$${PYTHONPATH}",
"${env:PYTHONPATH}"
]
vscode.python_env.PATH= [
"$$$${PATH}",
"$root/_build/$platform/$config",
]
vscode.write_python_paths_in_settings_json = true
vscode.generate_python_env_file = false
#licensing.enabled = true
#licensing.packages = [
# "${root}/deps/target-deps.packman.xml",
# "${root}/deps/usd-deps.packman.xml",
#]
#licensing.fail_on_missing = true
# Disable pip license gathering (we don't have any)
fetch.pip.gather_licenses_path = ""
msbuild.sln_file = "blast-sdk.sln"
msbuild.vs_version = "vs2017"
[[repo_build.argument]]
name = "-py"
help = "Python version."
kwargs.choices = ["0", "27", "36", "37"]
kwargs.nargs = 1
extra_premake_args = ["--python-version={}"]
########################################################################################################################
# Code Format Tool
########################################################################################################################
[repo_format]
| 1,945 | TOML | 28.938461 | 120 | 0.420051 |
NVIDIA-Omniverse/PhysX/blast/README.md | # Blast SDK Repo
Online documentation may be found here: [Blast SDK Documentation](https://nvidia-omniverse.github.io/PhysX/blast/index.html).
## Building the SDK
### Windows
1. run `build.bat`
2. built sdk location: `_build\windows-x86_64\release\blast-sdk` (release), `_build\windows-x86_64\debug\blast-sdk` (debug)
### Linux
0. initialize (once): run `./setup.sh`
1. run `./build.sh`
2. built sdk location: `_build/linux-x86_64/release/blast-sdk` (release), `_build/linux-x86_64/debug/blast-sdk` (debug)
| 513 | Markdown | 33.266664 | 125 | 0.711501 |
NVIDIA-Omniverse/PhysX/blast/deps/target-deps.packman.xml | <project toolsVersion="5.0">
<dependency name="python27" linkPath="../_build/target-deps/python27">
<package name="python" version="2.7.14-windows-x64-1" platforms="windows-x86_64"/>
</dependency>
<dependency name="python36" linkPath="../_build/target-deps/python36">
<package name="python" version="3.6.7-windows-x86_64" platforms="windows-x86_64"/>
<package name="python" version="3.6.5-linux-x64" platforms="linux-x86_64"/>
<package name="python" version="3.6.8-34.a6e9b99d-linux-aarch64" platforms="linux-aarch64"/>
</dependency>
<dependency name="python37" linkPath="../_build/target-deps/python37">
<package name="python" version="3.7.9-windows-x86_64" platforms="windows-x86_64" />
<package name="python" version="3.7.9-173.e9ee4ea0-${platform}" platforms="linux-x86_64 linux-aarch64" />
</dependency>
<dependency name="doxygen" linkPath="_dependencies/doxygen">
<package name="doxygen" version="1.8.5-windows-x86_64" platforms="windows-x86_64" />
</dependency>
<dependency name="BoostMultiprecision" linkPath="../_build/target-deps/BoostMultiprecision">
<package name="BoostMultiprecision" version="1.64.0.1" platforms="windows-x86_64 linux-x86_64"/>
</dependency>
<dependency name="googletest" linkPath="../_build/target-deps/googletest">
<package name="googletest-win" version="1.4.0.2" platforms="windows-x86_64"/>
<package name="googletest-linux-x86_64" version="1.8.0.1" platforms="linux-x86_64"/>
</dependency>
</project>
| 1,504 | XML | 50.89655 | 109 | 0.704122 |
NVIDIA-Omniverse/PhysX/blast/deps/repo-deps.packman.xml | <project toolsVersion="5.0">
<dependency name="repo_man" linkPath="../_repo/deps/repo_man">
<package name="repo_man" version="1.48.1" />
</dependency>
<dependency name="repo_build" linkPath="../_repo/deps/repo_build" tags="non-redist">
<package name="repo_build" version="0.55.3" />
</dependency>
</project>
| 324 | XML | 35.111107 | 86 | 0.657407 |
NVIDIA-Omniverse/PhysX/blast/deps/host-deps.packman.xml | <project toolsVersion="6.11">
<dependency name="premake" linkPath="../_build/host-deps/premake">
<package name="premake" version="5.0.9-nv-main-68e9a88a-${platform}" />
</dependency>
<dependency name="msvc" linkPath="../_build/host-deps/msvc">
<package name="msvc" version="2017-15.9.17-1" platforms="windows-x86_64" />
</dependency>
<dependency name="winsdk" linkPath="../_build/host-deps/winsdk">
<package name="winsdk" version="10.17763" platforms="windows-x86_64"/>
</dependency>
<dependency name="llvm" linkPath="../_build/host-deps/llvm">
<package name="llvm" version="6.0.0-linux-x86_64" platforms="linux-x86_64 linux-aarch64"/>
</dependency>
<dependency name="gcc_x64" linkPath="../_build/host-deps/gcc-x86_64">
<package name="gcc" version="9.2.0-binutils-2.30-x86_64-pc-linux-gnu-2" platforms="linux-x86_64"/>
</dependency>
<dependency name="gcc_aarch64" linkPath="../_build/host-deps/gcc-aarch64">
<package name="gcc" version="9.2.0-aarch64-pc-linux-gnu" platforms="linux-aarch64"/>
</dependency>
<dependency name="mirror" linkPath="../_build/host-deps/mirror">
<package name="mirror" version="0.1.110-a2df2ebd-windows-x86_64" platforms="windows-x86_64" />
<package name="mirror" version="0.1.100-81448125-linux-x86_64" platforms="linux-x86_64" />
<package name="mirror" version="0.1.100-dev-linux-aarch64" platforms="linux-aarch64" />
</dependency>
<dependency name="linbuild" linkPath="../_build/host-deps/linbuild">
<package name="linbuild" version="1.10.112-044606b-aarch64" platforms="linux-aarch64" />
<package name="linbuild" version="1.10.112-044606b-x86_64" platforms="linux-x86_64" />
</dependency>
<!-- <dependency name="omnitrace_tools" linkPath="../_build/host-deps/omni-trace-tools">
<package name="omnitrace-tools" version="0.5.62a10f54-$platform-release-dev"/>
</dependency> -->
<dependency name="CapnProto" linkPath="../_build/host-deps/CapnProto">
<package name="CapnProto" version="0.6.1.4" platforms="windows-x86_64 linux-x86_64" />
</dependency>
</project>
| 2,080 | XML | 56.805554 | 102 | 0.689904 |
NVIDIA-Omniverse/PhysX/blast/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="http" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
<transport actions="upload" protocol="s3" packageLocation="packages-for-cloudfront" />
<transport actions="list" protocol="http" packageLocation="omnipackages.nvidia.com/api/v1/list/cloudfront" />
</remote2>
</config>
| 423 | XML | 51.999994 | 122 | 0.70922 |
NVIDIA-Omniverse/PhysX/blast/PACKAGE-LICENSES/vhacd-LICENSE.md | Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. The names of the contributors may not be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
| 1,468 | Markdown | 49.655171 | 78 | 0.815395 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/TkBaseTest.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef TKBASETEST_H
#define TKBASETEST_H
#include "NvBlastTk.h"
#include "NvBlastTkActor.h"
#include "NvTaskManager.h"
#include "NvBlastTkGroupTaskManager.h"
#include "NvCpuDispatcher.h"
#include "NsGlobals.h"
#include "BlastBaseTest.h"
#include "NvBlastExtDamageShaders.h"
#include "NvBlastIndexFns.h"
#include "TestProfiler.h"
#include "NvTask.h"
#include <thread>
#include <algorithm>
#include <queue>
#include <mutex>
#include <condition_variable>
#include <atomic>
using namespace Nv::Blast;
using namespace nvidia;
using namespace nvidia::task;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Helpers
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
NV_INLINE void ExpectArrayMatch(TkObject** arr0, size_t size0, TkObject** arr1, size_t size1)
{
EXPECT_TRUE(size0 == size1);
std::set<TkObject*> set0(arr0, arr0 + size0);
std::set<TkObject*> set1(arr1, arr1 + size1);
EXPECT_TRUE(set0 == set1);
}
class TestCpuDispatcher : public NvCpuDispatcher
{
struct SharedContext
{
std::queue<NvBaseTask*> workQueue;
std::condition_variable cv;
std::mutex mutex;
std::atomic<bool> quit;
};
void submitTask(NvBaseTask& task) override
{
if (m_threads.size() > 0)
{
std::unique_lock<std::mutex> lk(m_context.mutex);
m_context.workQueue.push(&task);
lk.unlock();
m_context.cv.notify_one();
}
else
{
TEST_ZONE_BEGIN(task.getName());
task.run();
TEST_ZONE_END(task.getName());
task.release();
}
}
uint32_t getWorkerCount() const override { return (uint32_t)m_threads.size(); }
static void execute(SharedContext& context)
{
while (!context.quit)
{
std::unique_lock<std::mutex> lk(context.mutex);
if (!context.workQueue.empty())
{
NvBaseTask& task = *context.workQueue.front();
context.workQueue.pop();
lk.unlock();
TEST_ZONE_BEGIN(task.getName());
task.run();
TEST_ZONE_END(task.getName());
task.release();
}
else
{
// shared variables must be modified under the mutex in order
// to correctly publish the modification to the waiting thread
context.cv.wait(lk, [&]{ return !context.workQueue.empty() || context.quit; });
}
}
}
SharedContext m_context;
std::vector<std::thread> m_threads;
public:
TestCpuDispatcher(uint32_t numWorkers)
{
m_context.quit = false;
for (uint32_t i = 0; i < numWorkers; ++i)
{
m_threads.push_back(std::thread(execute, std::ref(m_context)));
}
}
void release()
{
std::unique_lock<std::mutex> lk(m_context.mutex);
m_context.quit = true;
lk.unlock();
m_context.cv.notify_all();
for (std::thread& t : m_threads)
{
t.join();
}
delete this;
}
};
struct CSParams
{
CSParams(uint32_t axis_, float coord_) : axis(axis_), coord(coord_) {}
uint32_t axis;
float coord;
};
static void CubeSlicer(NvBlastFractureBuffers* outbuf, const NvBlastGraphShaderActor* actor, const void* params)
{
uint32_t bondFractureCount = 0;
uint32_t bondFractureCountMax = outbuf->bondFractureCount;
const CSParams& p = *reinterpret_cast<const CSParams*> (reinterpret_cast<const NvBlastExtProgramParams*>(params)->damageDesc);
uint32_t currentNodeIndex = actor->firstGraphNodeIndex;
while (!Nv::Blast::isInvalidIndex(currentNodeIndex))
{
for (uint32_t adj = actor->adjacencyPartition[currentNodeIndex]; adj < actor->adjacencyPartition[currentNodeIndex + 1]; ++adj)
{
if (currentNodeIndex < actor->adjacentNodeIndices[adj])
{
if (actor->assetBonds[actor->adjacentBondIndices[adj]].centroid[p.axis] == p.coord && bondFractureCount < bondFractureCountMax)
{
NvBlastBondFractureData& data = outbuf->bondFractures[bondFractureCount++];
data.userdata = 0;
data.nodeIndex0 = currentNodeIndex;
data.nodeIndex1 = actor->adjacentNodeIndices[adj];
data.health = 1.0f;
}
}
}
currentNodeIndex = actor->graphNodeIndexLinks[currentNodeIndex];
}
outbuf->bondFractureCount = bondFractureCount;
outbuf->chunkFractureCount = 0;
//printf("slicer outcount %d\n", bondFractureCount);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// TkBaseTest Class
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<int FailLevel, int Verbosity>
class TkBaseTest : public BlastBaseTest<FailLevel, Verbosity>
{
public:
TkBaseTest() : m_cpuDispatcher(), m_taskman(nullptr)
{
}
virtual void SetUp() override
{
NvBlastInternalProfilerSetDetail(Nv::Blast::InternalProfilerDetail::LOW);
NvBlastInternalProfilerSetPlatformEnabled(true);
m_cpuDispatcher = new TestCpuDispatcher(4);
m_taskman = NvTaskManager::createTaskManager(*NvBlastGlobalGetErrorCallback(), m_cpuDispatcher);
m_groupTM = TkGroupTaskManager::create(*m_taskman);
}
virtual void TearDown() override
{
m_groupTM->release();
m_cpuDispatcher->release();
if (m_taskman) m_taskman->release();
}
void createFramework()
{
TkFramework* framework = NvBlastTkFrameworkCreate();
EXPECT_TRUE(framework != nullptr);
EXPECT_EQ(framework, NvBlastTkFrameworkGet());
}
void releaseFramework()
{
TkFramework* framework = NvBlastTkFrameworkGet();
framework->release();
EXPECT_TRUE(NvBlastTkFrameworkGet() == nullptr);
}
void createTestAssets(bool addInternalJoints = false)
{
const uint8_t cube1BondDescFlags_internalJoints[12] =
{
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::BondJointed,
TkAssetDesc::BondJointed,
TkAssetDesc::BondJointed,
TkAssetDesc::BondJointed
};
const uint32_t assetDescCount = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
TkFramework* framework = NvBlastTkFrameworkGet();
for (uint32_t i = 0; i < assetDescCount; ++i)
{
TkAssetDesc desc;
reinterpret_cast<NvBlastAssetDesc&>(desc) = g_assetDescs[i];
desc.bondFlags = addInternalJoints ? cube1BondDescFlags_internalJoints : nullptr;
testAssets.push_back(framework->createAsset(desc));
EXPECT_TRUE(testAssets[i] != nullptr);
}
}
TkAsset* createCubeAsset(size_t maxDepth, size_t width, int32_t supportDepth = -1, bool addInternalJoints = false)
{
TkFramework* framework = NvBlastTkFrameworkGet();
GeneratorAsset cube;
TkAssetDesc assetDesc;
generateCube(cube, assetDesc, maxDepth, width, supportDepth);
std::vector<uint8_t> bondFlags(assetDesc.bondCount);
std::fill(bondFlags.begin(), bondFlags.end(), addInternalJoints ? 1 : 0);
assetDesc.bondFlags = bondFlags.data();
TkAsset* cubeAsset = framework->createAsset(assetDesc);
testAssets.push_back(cubeAsset);
return cubeAsset;
}
void releaseTestAssets()
{
for (uint32_t i = 0; i < testAssets.size(); ++i)
{
testAssets[i]->release();
}
testAssets.clear();
}
NvBlastExtRadialDamageDesc getRadialDamageDesc(float x, float y, float z, float minRadius = 10.0f, float maxRadius = 10.0f, float damage = 1.0f)
{
NvBlastExtRadialDamageDesc desc;
desc.position[0] = x;
desc.position[1] = y;
desc.position[2] = z;
desc.minRadius = minRadius;
desc.maxRadius = maxRadius;
desc.damage = damage;
return desc;
}
NvBlastExtShearDamageDesc getShearDamageDesc(float x, float y, float z, float shearX = 1.0f, float shearY = 0.0f, float shearZ = 0.0f, float minRadius = 10.0f, float maxRadius = 10.0f, float damage = 1.0f)
{
NvBlastExtShearDamageDesc desc;
desc.position[0] = x;
desc.position[1] = y;
desc.position[2] = z;
desc.normal[0] = shearX;
desc.normal[1] = shearY;
desc.normal[2] = shearZ;
desc.minRadius = minRadius;
desc.maxRadius = maxRadius;
desc.damage = damage;
return desc;
}
static const NvBlastDamageProgram& getCubeSlicerProgram()
{
static NvBlastDamageProgram program = { CubeSlicer, nullptr };
return program;
}
static const NvBlastDamageProgram& getFalloffProgram()
{
static NvBlastDamageProgram program = { NvBlastExtFalloffGraphShader, NvBlastExtFalloffSubgraphShader };
return program;
}
static const NvBlastDamageProgram& getShearProgram()
{
static NvBlastDamageProgram program = { NvBlastExtShearGraphShader, NvBlastExtShearSubgraphShader };
return program;
}
static const NvBlastExtMaterial* getDefaultMaterial()
{
static NvBlastExtMaterial material;
return &material;
};
TkFamily* familySerialization(TkFamily* family);
std::vector<TkAsset*> testAssets;
TestCpuDispatcher* m_cpuDispatcher;
NvTaskManager* m_taskman;
TkGroupTaskManager* m_groupTM;
};
#define TkNvErrorMask (NvErrorCode::eINVALID_PARAMETER | NvErrorCode::eINVALID_OPERATION | NvErrorCode::eOUT_OF_MEMORY | NvErrorCode::eINTERNAL_ERROR | NvErrorCode::eABORT)
#define TkNvWarningMask (NvErrorCode::eDEBUG_WARNING | NvErrorCode::ePERF_WARNING)
typedef TkBaseTest<NvBlastMessage::Error, 1> TkTestAllowWarnings;
typedef TkBaseTest<NvBlastMessage::Warning, 1> TkTestStrict;
class TestFamilyTracker : public TkEventListener
{
public:
TestFamilyTracker() {}
typedef std::pair<TkFamily*, uint32_t> Actor;
virtual void receive(const TkEvent* events, uint32_t eventCount) override
{
TEST_ZONE_BEGIN("TestFamilyTracker");
for (size_t i = 0; i < eventCount; ++i)
{
const TkEvent& e = events[i];
switch (e.type)
{
case (TkEvent::Split):
{
const TkSplitEvent* splitEvent = e.getPayload<TkSplitEvent>();
EXPECT_EQ((size_t)1, actors.erase(Actor(splitEvent->parentData.family, splitEvent->parentData.index)));
for (size_t i = 0; i < splitEvent->numChildren; ++i)
{
TkActor* a = splitEvent->children[i];
EXPECT_TRUE(actors.insert(Actor(&a->getFamily(), a->getIndex())).second);
}
break;
}
case (TkEvent::FractureCommand):
{
const TkFractureCommands* fracEvent = e.getPayload<TkFractureCommands>();
EXPECT_TRUE(!isInvalidIndex(fracEvent->tkActorData.index));
#if 0
printf("chunks broken: %d\n", fracEvent->buffers.chunkFractureCount);
printf("bonds broken: %d\n", fracEvent->buffers.bondFractureCount);
for (uint32_t t = 0; t < fracEvent->buffers.bondFractureCount; t++)
{
//printf("%x ", fracEvent->buffers.bondFractures[t].userdata);
}
//printf("\n");
#endif
break;
}
case (TkEvent::FractureEvent):
{
const TkFractureEvents* fracEvent = e.getPayload<TkFractureEvents>();
EXPECT_TRUE(!isInvalidIndex(fracEvent->tkActorData.index));
break;
}
case (TkEvent::JointUpdate):
{
const TkJointUpdateEvent* jointEvent = e.getPayload<TkJointUpdateEvent>();
TkJoint* joint = jointEvent->joint;
EXPECT_TRUE(joint != nullptr);
switch (jointEvent->subtype)
{
case TkJointUpdateEvent::External:
EXPECT_TRUE(joints.end() == joints.find(joint)); // We should not have this joint yet
joints.insert(joint);
break;
case TkJointUpdateEvent::Changed:
break;
case TkJointUpdateEvent::Unreferenced:
EXPECT_EQ(1, joints.erase(joint));
joint->release();
break;
}
break;
}
default:
break;
}
}
TEST_ZONE_END("TestFamilyTracker");
}
void insertActor(const TkActor* actor)
{
actors.insert(TestFamilyTracker::Actor(&actor->getFamily(), actor->getIndex()));
}
void eraseActor(const TkActor* actor)
{
actors.erase(TestFamilyTracker::Actor(&actor->getFamily(), actor->getIndex()));
}
std::set<Actor> actors;
std::set<TkJoint*> joints;
};
#endif // #ifndef TKBASETEST_H
| 15,398 | C | 32.403471 | 209 | 0.589622 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/BlastBaseTest.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef BLASTBASETEST_H
#define BLASTBASETEST_H
#include "NvBlastTkFramework.h"
#include "gtest/gtest.h"
#include "NvBlast.h"
#include "TestAssets.h"
#include "NvBlastGlobals.h"
#include <ostream>
template<int FailLevel, int Verbosity>
class BlastBaseTest : public testing::Test, public nvidia::NvErrorCallback
{
public:
BlastBaseTest()
{
NvBlastGlobalSetErrorCallback(this);
}
// A zeroing alloc with the same signature as malloc
static void* alignedZeroedAlloc(size_t size)
{
return memset(NVBLAST_ALLOC(size), 0, size);
}
static void alignedFree(void* mem)
{
NVBLAST_FREE(mem);
}
// Message log for blast functions
static void messageLog(int type, const char* msg, const char* file, int line)
{
if (FailLevel >= type)
{
switch (type)
{
case NvBlastMessage::Error: EXPECT_TRUE(false) << "NvBlast Error message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Warning: EXPECT_TRUE(false) << "NvBlast Warning message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Info: EXPECT_TRUE(false) << "NvBlast Info message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Debug: EXPECT_TRUE(false) << "NvBlast Debug message in " << file << "(" << line << "): " << msg << "\n"; break;
}
}
else
if (Verbosity > 0)
{
switch (type)
{
case NvBlastMessage::Error: std::cout << "NvBlast Error message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Warning: std::cout << "NvBlast Warning message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Info: std::cout << "NvBlast Info message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Debug: std::cout << "NvBlast Debug message in " << file << "(" << line << "): " << msg << "\n"; break;
}
}
}
// nvidia::NvErrorCallback interface
virtual void reportError(nvidia::NvErrorCode::Enum code, const char* message, const char* file, int line) override
{
uint32_t failMask = 0;
switch (FailLevel)
{
case NvBlastMessage::Debug:
case NvBlastMessage::Info: failMask |= nvidia::NvErrorCode::eDEBUG_INFO;
case NvBlastMessage::Warning: failMask |= nvidia::NvErrorCode::eDEBUG_WARNING;
case NvBlastMessage::Error: failMask |= nvidia::NvErrorCode::eABORT | nvidia::NvErrorCode::eABORT | nvidia::NvErrorCode::eINTERNAL_ERROR | nvidia::NvErrorCode::eOUT_OF_MEMORY | nvidia::NvErrorCode::eINVALID_OPERATION | nvidia::NvErrorCode::eINVALID_PARAMETER;
default: break;
}
if (!(failMask & code) && Verbosity <= 0)
{
return;
}
std::string output = "NvBlast Test ";
switch (code)
{
case nvidia::NvErrorCode::eNO_ERROR: break;
case nvidia::NvErrorCode::eDEBUG_INFO: output += "Debug Info"; break;
case nvidia::NvErrorCode::eDEBUG_WARNING: output += "Debug Warning"; break;
case nvidia::NvErrorCode::eINVALID_PARAMETER: output += "Invalid Parameter"; break;
case nvidia::NvErrorCode::eINVALID_OPERATION: output += "Invalid Operation"; break;
case nvidia::NvErrorCode::eOUT_OF_MEMORY: output += "Out of Memory"; break;
case nvidia::NvErrorCode::eINTERNAL_ERROR: output += "Internal Error"; break;
case nvidia::NvErrorCode::eABORT: output += "Abort"; break;
case nvidia::NvErrorCode::ePERF_WARNING: output += "Perf Warning"; break;
default: FAIL();
}
output += std::string(" message in ") + file + "(" + std::to_string(line) + "): " + message + "\n";
if (failMask & code)
{
EXPECT_TRUE(false) << output;
}
else
{
std::cout << output;
}
}
};
#endif // #ifndef BLASTBASETEST_H
| 5,900 | C | 41.760869 | 271 | 0.605424 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/perf/BlastBasePerfTest.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef BLASTBASEPERFTEST_H
#define BLASTBASEPERFTEST_H
#include "BlastBaseTest.h"
#include <fstream>
#include <algorithm>
#include <map>
template<typename T>
class DataCollection
{
public:
struct Stats
{
double m_mean;
double m_sdev;
double m_min;
double m_max;
Stats()
{
reset();
}
void reset()
{
m_mean = 0.0;
m_sdev = 0.0;
m_min = std::numeric_limits<double>().max();
m_max = -std::numeric_limits<double>().max();
}
};
struct DataSet
{
std::vector<T> m_data;
Stats m_stats;
void calculateStats()
{
m_stats.reset();
if (m_data.size() > 0)
{
if (m_data.size() > 1) // Remove top half of values to eliminate outliers
{
std::sort(m_data.begin(), m_data.end());
m_data.resize(m_data.size() / 2);
}
for (size_t i = 0; i < m_data.size(); ++i)
{
m_stats.m_mean += m_data[i];
m_stats.m_min = std::min(m_stats.m_min, (double)m_data[i]);
m_stats.m_max = std::max(m_stats.m_max, (double)m_data[i]);
}
m_stats.m_mean /= m_data.size();
if (m_data.size() > 1)
{
for (size_t i = 0; i < m_data.size(); ++i)
{
m_stats.m_sdev += pow(m_data[i] - m_stats.m_mean, 2);
}
m_stats.m_sdev = sqrt(m_stats.m_sdev / (m_data.size() - 1));
}
}
}
};
DataSet& getDataSet(const std::string& name)
{
auto entry = m_lookup.find(name);
if (entry != m_lookup.end())
{
return m_dataSets[entry->second];
}
m_lookup[name] = m_dataSets.size();
m_dataSets.push_back(DataSet());
return m_dataSets.back();
}
bool dataSetExists(const std::string& name) const
{
return m_lookup.find(name) != m_lookup.end();
}
void calculateStats()
{
for (size_t i = 0; i < m_dataSets.size(); ++i)
{
m_dataSets[i].calculateStats();
}
}
void test(DataCollection<int64_t>& calibration, double relativeThreshold = 0.10, double tickThreshold = 100.0)
{
for (auto entry = m_lookup.begin(); entry != m_lookup.end(); ++entry)
{
const std::string& name = entry->first;
DataCollection<int64_t>::DataSet& data = m_dataSets[entry->second];
data.calculateStats();
if (!calibration.dataSetExists(name))
{
FAIL() << "PerfTest is not calibrated!" << std::endl << "Missing DataSet: " << name << std::endl;
}
const DataCollection<int64_t>::DataSet& cal = calibration.getDataSet(name);
const double calMin = cal.m_stats.m_min;
if (data.m_stats.m_min > (1.0 + relativeThreshold) * calMin && data.m_stats.m_min - calMin > tickThreshold)
{
std::cout << name << ":" << std::endl;
std::cout << "PERF - : Timing (" << data.m_stats.m_min << ") exceeds recorded min (" << calMin << ") by more than allowed relative threshold (" << relativeThreshold*100 << "%) and absolute threshold (" << tickThreshold << " ticks)." << std::endl;
EXPECT_FALSE(data.m_stats.m_min > (1.0 + relativeThreshold) * calMin && data.m_stats.m_min - calMin > tickThreshold)
<< name << ":" << std::endl
<< "PERF - : Timing (" << data.m_stats.m_min << ") exceeds recorded min (" << calMin << ") by more than allowed relative threshold (" << relativeThreshold * 100 << "%) and absolute threshold (" << tickThreshold << " ticks)." << std::endl;
}
else
if (data.m_stats.m_min < (1.0 - relativeThreshold) * calMin && data.m_stats.m_min - calMin < -tickThreshold)
{
std::cout << name << ":" << std::endl;
std::cout << "PERF + : Timing (" << data.m_stats.m_min << ") is less than the recorded min (" << calMin << ") by more than the relative threshold (" << relativeThreshold * 100 << "%) and absolute threshold (" << tickThreshold << " ticks)." << std::endl;
}
}
}
size_t size() const
{
return m_dataSets.size();
}
void clear()
{
m_lookup.clear();
m_dataSets.clear();
}
template<class S>
friend std::istream& operator >> (std::istream& stream, DataCollection<S>& c);
template<class S>
friend std::ostream& operator << (std::ostream& stream, const DataCollection<S>& c);
private:
std::map<std::string, size_t> m_lookup;
std::vector< DataSet > m_dataSets;
};
template<typename T>
std::istream& operator >> (std::istream& stream, DataCollection<T>& c)
{
std::string name;
while (!stream.eof())
{
std::getline(stream >> std::ws, name);
typename DataCollection<T>::DataSet& dataSet = c.getDataSet(name);
stream >> dataSet.m_stats.m_mean >> dataSet.m_stats.m_sdev >> dataSet.m_stats.m_min >> dataSet.m_stats.m_max >> std::ws;
}
return stream;
}
template<typename T>
std::ostream& operator << (std::ostream& stream, const DataCollection<T>& c)
{
for (auto entry = c.m_lookup.begin(); entry != c.m_lookup.end(); ++entry)
{
const std::string& name = entry->first;
stream << name.c_str() << std::endl;
const typename DataCollection<T>::DataSet& data = c.m_dataSets[entry->second];
stream << data.m_stats.m_mean << " " << data.m_stats.m_sdev << " " << data.m_stats.m_min << " " << data.m_stats.m_max << std::endl;
}
return stream;
}
static const char* getPlatformSuffix()
{
#if NV_WIN32
return "win32";
#elif NV_WIN64
return "win64";
#elif NV_LINUX
#if NV_X64
return "linux64";
#else
return "linux32";
#endif
#else
return "gen";
#endif
}
static const char* getPlatformRoot()
{
#if NV_LINUX
return "../../";
#else
return "../../../";
#endif
}
static std::string defaultRelativeDataPath()
{
const char* dataDir = "test/data/";
std::string rootDir = getPlatformRoot();
return rootDir + dataDir + getPlatformSuffix() + "/";
}
class PerfTestEngine
{
public:
PerfTestEngine(const char* collectionName) : m_calibrate(false)
{
m_filename = defaultRelativeDataPath() + std::string(collectionName) + "_" + getPlatformSuffix() + ".cal";
auto argvs = testing::internal::GetArgvs();
size_t argCount = argvs.size();
for (size_t argNum = 0; argNum < argCount; ++argNum)
{
if (argvs[argNum] == "-calibrate")
{
m_calibrate = true;
}
else
if (argvs[argNum] == "-calPath")
{
if (++argNum < argCount)
{
m_filename = argvs[argNum];
}
}
}
if (!m_calibrate)
{
std::ifstream in;
in.open(m_filename);
if (in.is_open())
{
std::string name;
std::getline(in, name); // Eat header
std::getline(in, name); // Eat header (2 lines)
in >> m_dataCalibration;
in.close();
}
m_calibrate = m_dataCalibration.size() == 0;
}
if (m_calibrate)
{
std::ofstream out;
out.open(m_filename);
if (out.is_open())
{
out << "Format: timing name (whole line)" << std::endl << "timing mean s.d. min max" << std::endl; // Header (2 lines)
out.close();
}
}
if (m_calibrate)
{
std::cout << "******** Calibration Mode ********\n";
}
else
{
std::cout << "******** Test Mode ********\n";
std::cout << "Read calibration data from " << m_filename << std::endl;
}
}
void endTest()
{
if (m_calibrate)
{
m_dataTempCollection.calculateStats();
std::ofstream out;
out.open(m_filename, std::ofstream::app);
if (out.is_open())
{
out << m_dataTempCollection;
out.close();
std::cout << "Calibration stats written to " << m_filename << std::endl;
}
else
{
std::cout << "Failed to open calibration file " << m_filename << ". Stats not written." << std::endl;
FAIL() << "Failed to open calibration file " << m_filename << ". Stats not written." << std::endl;
}
}
else
{
m_dataTempCollection.test(m_dataCalibration);
}
m_dataTempCollection.clear();
}
void reportData(const std::string& name, int64_t data)
{
m_dataTempCollection.getDataSet(name).m_data.push_back(data);
}
private:
std::string m_filename;
bool m_calibrate;
DataCollection<int64_t> m_dataTempCollection;
DataCollection<int64_t> m_dataCalibration;
};
template<int FailLevel, int Verbosity>
class BlastBasePerfTest : public BlastBaseTest<FailLevel, Verbosity>
{
public:
/**
This function allows to create/destroy and get PerfTestEngine in local static variable (works header only).
It allows to have PeftTestEngine alive through life span of gtest TestCase.
*/
static PerfTestEngine* getEngineDeadOrAlive(bool alive = true)
{
static PerfTestEngine* engine = nullptr;
if (alive && !engine)
{
engine = new PerfTestEngine(::testing::UnitTest::GetInstance()->current_test_case()->name());
}
else if (!alive && engine)
{
delete engine;
engine = nullptr;
}
return engine;
}
static void SetUpTestCase()
{
getEngineDeadOrAlive();
}
static void TearDownTestCase()
{
getEngineDeadOrAlive(false);
}
void TearDown() override
{
getEngineDeadOrAlive()->endTest();
}
void reportData(const std::string& name, int64_t data)
{
getEngineDeadOrAlive()->reportData(name, data);
}
};
#endif // #ifndef BLASTBASEPERFTEST_H
| 12,281 | C | 30.818653 | 269 | 0.537741 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/perf/SolverPerfTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "BlastBasePerfTest.h"
#include "TestAssets.h"
#include "NvBlastExtDamageShaders.h"
#include <memory>
static void blast
(
std::set<NvBlastActor*>& actorsToDamage,
GeneratorAsset* testAsset,
GeneratorAsset::Vec3 localPos,
float minRadius, float maxRadius,
float compressiveDamage,
NvBlastTimers& timers
)
{
std::vector<NvBlastChunkFractureData> chunkEvents; /* num lower-support chunks + bonds */
std::vector<NvBlastBondFractureData> bondEvents; /* num lower-support chunks + bonds */
chunkEvents.resize(testAsset->solverChunks.size());
bondEvents.resize(testAsset->solverBonds.size());
NvBlastExtRadialDamageDesc damage[] = {
compressiveDamage,
{ localPos.x, localPos.y, localPos.z },
minRadius,
maxRadius
};
NvBlastExtProgramParams programParams =
{
damage,
nullptr
};
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
nullptr
};
std::vector<char> splitScratch;
std::vector<NvBlastActor*> newActors(testAsset->solverChunks.size());
size_t totalNewActorsCount = 0;
for (std::set<NvBlastActor*>::iterator k = actorsToDamage.begin(); k != actorsToDamage.end();)
{
NvBlastActor* actor = *k;
NvBlastFractureBuffers events = { (uint32_t)bondEvents.size(), (uint32_t)chunkEvents.size(), bondEvents.data(), chunkEvents.data() };
NvBlastActorGenerateFracture(&events, actor, program, &programParams, nullptr, &timers);
NvBlastActorApplyFracture(&events, actor, &events, nullptr, &timers);
bool removeActor = false;
if (events.bondFractureCount + events.chunkFractureCount > 0)
{
splitScratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, nullptr));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = &newActors[totalNewActorsCount];
const size_t bufferSize = newActors.size() - totalNewActorsCount;
const size_t newActorsCount = NvBlastActorSplit(&result, actor, (uint32_t)bufferSize, splitScratch.data(), nullptr, &timers);
totalNewActorsCount += newActorsCount;
removeActor = newActorsCount > 0;
}
if (removeActor)
{
k = actorsToDamage.erase(k);
}
else
{
++k;
}
}
for (size_t i = 0; i < totalNewActorsCount; ++i)
{
actorsToDamage.insert(newActors[i]);
}
}
typedef BlastBasePerfTest<NvBlastMessage::Warning, 1> BlastBasePerfTestStrict;
class PerfTest : public BlastBasePerfTestStrict
{
public:
void damageLeafSupportActors(const char* testName, uint32_t assetCount, uint32_t familyCount, uint32_t damageCount)
{
const float relativeDamageRadius = 0.2f;
const float compressiveDamage = 1.0f;
const uint32_t minChunkCount = 100;
const uint32_t maxChunkCount = 10000;
srand(0);
for (uint32_t assetNum = 0; assetNum < assetCount; ++assetNum)
{
GeneratorAsset cube;
NvBlastAssetDesc desc;
generateRandomCube(cube, desc, minChunkCount, maxChunkCount);
{
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* mem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(mem, &desc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
// Generate familes
for (uint32_t familyNum = 0; familyNum < familyCount; ++familyNum)
{
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = nullptr;
actorDesc.uniformInitialBondHealth = 1.0f;
actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* mem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(mem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
EXPECT_TRUE(family != nullptr);
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// Generate damage
std::set<NvBlastActor*> actors;
actors.insert(actor);
for (uint32_t damageNum = 0; damageNum < damageCount; ++damageNum)
{
GeneratorAsset::Vec3 localPos = cube.extents*GeneratorAsset::Vec3((float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f);
NvBlastTimers timers;
NvBlastTimersReset(&timers);
blast(actors, &cube, localPos, relativeDamageRadius, relativeDamageRadius*1.2f, compressiveDamage, timers);
const std::string timingName = std::string(testName) + " asset " + std::to_string(assetNum) + " family " + std::to_string(familyNum) + " damage " + std::to_string(damageNum);
BlastBasePerfTestStrict::reportData(timingName + " material", timers.material);
BlastBasePerfTestStrict::reportData(timingName + " fracture", timers.fracture);
BlastBasePerfTestStrict::reportData(timingName + " island", timers.island);
BlastBasePerfTestStrict::reportData(timingName + " partition", timers.partition);
BlastBasePerfTestStrict::reportData(timingName + " visibility", timers.visibility);
}
// Release remaining actors
std::for_each(actors.begin(), actors.end(), [](NvBlastActor* a){ NvBlastActorDeactivate(a, messageLog); });
actors.clear();
alignedFree(family);
}
// Release asset data
alignedFree(asset);
}
}
}
};
#if 0
// Tests
TEST_F(PerfTest, DamageLeafSupportActorsTestVisibility)
{
const int trialCount = 1000;
std::cout << "Trial (of " << trialCount << "): ";
for (int trial = 1; trial <= trialCount; ++trial)
{
if (trial % 100 == 0)
{
std::cout << trial << ".. ";
std::cout.flush();
}
damageLeafSupportActors(test_info_->name(), 4, 4, 5);
}
std::cout << "done." << std::endl;
}
#endif | 8,467 | C++ | 40.920792 | 198 | 0.631392 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/perf/DamagePerfTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "BlastBasePerfTest.h"
#include "NvBlastExtDamageShaders.h"
#include "NvBlastExtSerialization.h"
#include "NvBlastTime.h"
#include "NvVec3.h"
#include "NvBounds3.h"
#include <memory>
#include <random>
#include <cstdio>
using namespace Nv::Blast;
using namespace nvidia;
static void blast
(
std::set<NvBlastActor*>& actorsToDamage,
GeneratorAsset* testAsset,
NvBlastExtDamageAccelerator* accelerator,
GeneratorAsset::Vec3 localPos,
float minRadius, float maxRadius,
float compressiveDamage,
std::vector<uint32_t>& history,
NvBlastTimers& timers)
{
std::vector<NvBlastChunkFractureData> chunkEvents; /* num lower-support chunks + bonds */
std::vector<NvBlastBondFractureData> bondEvents; /* num lower-support chunks + bonds */
chunkEvents.resize(testAsset->solverChunks.size());
bondEvents.resize(testAsset->solverBonds.size());
NvBlastExtRadialDamageDesc damage = {
compressiveDamage,
{ localPos.x, localPos.y, localPos.z },
minRadius,
maxRadius
};
NvBlastExtMaterial material;
NvBlastExtProgramParams programParams =
{
&damage,
&material,
accelerator
};
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
nullptr
};
std::vector<char> splitScratch;
std::vector<NvBlastActor*> newActors(testAsset->solverChunks.size());
size_t totalNewActorsCount = 0;
for (std::set<NvBlastActor*>::iterator k = actorsToDamage.begin(); k != actorsToDamage.end();)
{
NvBlastActor* actor = *k;
NvBlastFractureBuffers events = { (uint32_t)bondEvents.size(), (uint32_t)chunkEvents.size(), bondEvents.data(), chunkEvents.data() };
NvBlastActorGenerateFracture(&events, actor, program, &programParams, nullptr, &timers);
NvBlastActorApplyFracture(nullptr, actor, &events, nullptr, &timers);
bool removeActor = false;
if (events.bondFractureCount + events.chunkFractureCount > 0)
{
history.push_back(events.bondFractureCount + events.chunkFractureCount);
splitScratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, nullptr));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = &newActors[totalNewActorsCount];
const size_t bufferSize = newActors.size() - totalNewActorsCount;
const size_t newActorsCount = NvBlastActorSplit(&result, actor, (uint32_t)bufferSize, splitScratch.data(), nullptr, &timers);
totalNewActorsCount += newActorsCount;
removeActor = newActorsCount > 0;
}
if (removeActor)
{
k = actorsToDamage.erase(k);
}
else
{
++k;
}
}
for (size_t i = 0; i < totalNewActorsCount; ++i)
{
actorsToDamage.insert(newActors[i]);
}
}
typedef BlastBasePerfTest<NvBlastMessage::Warning, 1> BlastBasePerfTestStrict;
struct PerfResults
{
int64_t totalTime;
int64_t createTime;
};
class PerfTest : public BlastBasePerfTestStrict
{
public:
NvBlastAsset* loadAsset(const char* path, ExtSerialization* ser)
{
std::ifstream infileStream(path, std::ios::binary);
if (!infileStream.is_open())
{
return nullptr;
}
const std::vector<char> inBuffer((std::istreambuf_iterator<char>(infileStream)), std::istreambuf_iterator<char>());
infileStream.close();
NvBlastAsset* asset = static_cast<NvBlastAsset*>(ser->deserializeFromBuffer(inBuffer.data(), inBuffer.size()));
return asset;
}
PerfResults damageLeafSupportActors(const char* testName, uint32_t assetCount, uint32_t familyCount, uint32_t damageCount, int accelType, std::vector<uint32_t>& history)
{
PerfResults results;
results.totalTime = 0;
results.createTime = 0;
const float compressiveDamage = 1.0f;
const uint32_t minChunkCount = 1000;
const uint32_t maxChunkCount = 100000;
srand(0);
for (uint32_t assetNum = 0; assetNum < assetCount; ++assetNum)
{
GeneratorAsset cube;
NvBlastAssetDesc desc;
generateRandomCube(cube, desc, minChunkCount, maxChunkCount);
{
std::vector<char> scratch;
nvidia::NvBounds3 bounds = nvidia::NvBounds3::empty();
#if 1
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* mem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(mem, &desc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
bounds = nvidia::NvBounds3::centerExtents(nvidia::NvVec3(0, 0, 0), nvidia::NvVec3(cube.extents.x, cube.extents.y, cube.extents.z));
#else
// load asset
NvBlastAsset* asset = nullptr;
ExtSerialization* ser = NvBlastExtSerializationCreate();
for (int s = 0; s < 5 && !asset; s++)
{
asset = loadAsset(&"../../../../../test/assets/table.blast"[s * 3], ser);
}
EXPECT_TRUE(asset != nullptr);
ser->release();
uint32_t bc = NvBlastAssetGetBondCount(asset, messageLog);
const NvBlastBond* bonds = NvBlastAssetGetBonds(asset, messageLog);
for (uint32_t i = 0; i < bc; i++)
{
bounds.include(reinterpret_cast<const nvidia::NvVec3&>(bonds[i].centroid));
}
#endif
Nv::Blast::Time t;
NvBlastExtDamageAccelerator* accelerator = NvBlastExtDamageAcceleratorCreate(asset, accelType);
results.createTime += t.getElapsedTicks();
// Generate familes
for (uint32_t familyNum = 0; familyNum < familyCount; ++familyNum)
{
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = nullptr;
actorDesc.uniformInitialBondHealth = 1.0f;
actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* mem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(mem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
EXPECT_TRUE(family != nullptr);
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// Generate damage
std::set<NvBlastActor*> actors;
actors.insert(actor);
for (uint32_t damageNum = 0; damageNum < damageCount; ++damageNum)
{
GeneratorAsset::Vec3 localPos = GeneratorAsset::Vec3((float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f) * 2;
localPos.x *= bounds.getExtents().x;
localPos.y *= bounds.getExtents().y;
localPos.z *= bounds.getExtents().z;
const float relativeDamageRadius = (float)rand() / RAND_MAX * bounds.getExtents().maxElement();
NvBlastTimers timers;
NvBlastTimersReset(&timers);
blast(actors, &cube, accelerator, localPos, relativeDamageRadius, relativeDamageRadius*1.2f, compressiveDamage, history, timers);
const std::string timingName = std::string(testName) + " asset " + std::to_string(assetNum) + " family " + std::to_string(familyNum) + " damage " + std::to_string(damageNum) + " accel " + std::to_string(accelType);
BlastBasePerfTestStrict::reportData(timingName + " material", timers.material);
history.push_back((uint32_t)actors.size());
results.totalTime += timers.material;
history.push_back(0); // separator
}
// Release remaining actors
std::for_each(actors.begin(), actors.end(), [](NvBlastActor* a) { NvBlastActorDeactivate(a, messageLog); });
actors.clear();
alignedFree(family);
}
if (accelerator)
accelerator->release();
// Release asset data
alignedFree(asset);
}
}
return results;
}
};
// Tests
TEST_F(PerfTest, DISABLED_DamageRadialSimple)
{
const int trialCount = 10;
std::cout << "Trial (of " << trialCount << "): ";
for (int trial = 1; trial <= trialCount; ++trial)
{
if (trial % 100 == 0)
{
std::cout << trial << ".. ";
std::cout.flush();
}
std::vector<uint32_t> history1, history2;
uint32_t assetCount = 4;
uint32_t familyCount = 4;
uint32_t damageCount = 4;
PerfResults results0 = damageLeafSupportActors(test_info_->name(), assetCount, familyCount, damageCount, 0, history1);
BlastBasePerfTestStrict::reportData("DamageRadialSimple total0 " , results0.totalTime);
BlastBasePerfTestStrict::reportData("DamageRadialSimple create0 ", results0.createTime);
PerfResults results1 = damageLeafSupportActors(test_info_->name(), assetCount, familyCount, damageCount, 1, history2);
BlastBasePerfTestStrict::reportData("DamageRadialSimple total1 ", results1.totalTime);
BlastBasePerfTestStrict::reportData("DamageRadialSimple create1 ", results1.createTime);
EXPECT_TRUE(history1 == history2);
}
std::cout << "done." << std::endl;
}
| 11,692 | C++ | 39.884615 | 238 | 0.62453 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/SyncTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "TkBaseTest.h"
#include "NvBlastExtPxSync.h"
#include "NvBlastTkEvent.h"
#include <map>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// ExtSync Tests
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class Base
{
public:
Base(TkTestStrict* test) : m_test(test)
{
}
void run(std::stringstream& finalState)
{
//////// initial setup ////////
m_test->createTestAssets();
TkFramework* fwk = NvBlastTkFrameworkGet();
TkGroupDesc gdesc;
gdesc.workerCount = m_test->m_taskman->getCpuDispatcher()->getWorkerCount();
m_group = fwk->createGroup(gdesc);
EXPECT_TRUE(m_group != nullptr);
TkActorDesc adesc(m_test->testAssets[0]);
NvBlastID id;
TkActor* actor0 = fwk->createActor(adesc);
EXPECT_TRUE(actor0 != nullptr);
families[0] = &actor0->getFamily();
memcpy(id.data, "Mumble Jumble Bumble", sizeof(NvBlastID)); // Stuffing an arbitrary 16 bytes (The prefix of the given string)
families[0]->setID(id);
m_group->addActor(*actor0);
TkActor* actor1 = fwk->createActor(adesc);
EXPECT_TRUE(actor1 != nullptr);
families[1] = &actor1->getFamily();
memcpy(id.data, "buzzkillerdiller", sizeof(NvBlastID)); // Stuffing an arbitrary 16 bytes (The prefix of the given string)
families[1]->setID(id);
m_group->addActor(*actor1);
m_test->m_groupTM->setGroup(m_group);
//////// server/client specific impl ////////
impl();
//////// write out framework final state ////////
finalState.clear();
for (auto family : families)
{
std::vector<TkActor*> actors(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (auto actor : actors)
{
finalState << actor->getVisibleChunkCount();
finalState << actor->getGraphNodeCount();
std::vector<uint32_t> chunkIndices(actor->getGraphNodeCount());
actor->getVisibleChunkIndices(chunkIndices.data(), (uint32_t)chunkIndices.size());
for (uint32_t chunkIndex : chunkIndices)
finalState << chunkIndex;
const float* bondHealths = actor->getBondHealths();
for (uint32_t i = 0; i < actor->getAsset()->getBondCount(); ++i)
finalState << bondHealths[i];
}
}
//////// release ////////
m_group->release();
for (auto family : families)
{
family->release();
}
m_test->releaseTestAssets();
}
protected:
virtual void impl() = 0;
TkTestStrict* m_test;
TkGroup* m_group;
TkFamily* families[2];
};
class Server : public Base
{
public:
Server(TkTestStrict* test, std::vector<ExtSyncEvent*>& syncBuffer) : Base(test), m_syncBuffer(syncBuffer) {}
protected:
virtual void impl() override
{
// create sync ext
ExtSync* sync = ExtSync::create();
// add sync as listener to family #1
families[1]->addListener(*sync);
// damage params
CSParams cs0(1, 0.0f);
NvBlastExtProgramParams csParams0 = { &cs0, nullptr };
NvBlastExtRadialDamageDesc radialDamage0 = m_test->getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialParams0 = { &radialDamage0, nullptr };
NvBlastExtRadialDamageDesc radialDamage1 = m_test->getRadialDamageDesc(0, 0, 0, 10.0f, 10.0f, 0.1f);
NvBlastExtProgramParams radialParams1 = { &radialDamage1, nullptr };
// damage family #0 (make it split)
{
TkActor* actor;
families[0]->getActors(&actor, 1);
actor->damage(m_test->getCubeSlicerProgram(), &csParams0);
}
// process
m_test->m_groupTM->process();
m_test->m_groupTM->wait();
EXPECT_EQ(families[0]->getActorCount(), 2);
// sync family #0
sync->syncFamily(*families[0]);
// add sync as listener to family #0
families[0]->addListener(*sync);
// damage family #0 (make it split fully)
{
TkActor* actor;
families[0]->getActors(&actor, 1, 1);
actor->damage(m_test->getFalloffProgram(), &radialParams0);
}
// damage family 1 (just damage bonds health)
{
TkActor* actor;
families[1]->getActors(&actor, 1);
NvBlastExtRadialDamageDesc radialDamage = m_test->getRadialDamageDesc(0, 0, 0, 10.0f, 10.0f, 0.1f);
actor->damage(m_test->getFalloffProgram(), &radialParams1);
}
// process
m_test->m_groupTM->process();
m_test->m_groupTM->wait();
EXPECT_EQ(families[0]->getActorCount(), 5);
EXPECT_EQ(families[1]->getActorCount(), 1);
// take sync buffer from sync
{
const ExtSyncEvent*const* buffer;
uint32_t size;
sync->acquireSyncBuffer(buffer, size);
m_syncBuffer.resize(size);
for (size_t i = 0; i < size; ++i)
{
m_syncBuffer[i] = buffer[i]->clone();
}
sync->releaseSyncBuffer();
}
//
families[0]->removeListener(*sync);
families[1]->removeListener(*sync);
//
sync->release();
}
private:
std::vector<ExtSyncEvent*>& m_syncBuffer;
};
class Client : public Base, public TkEventListener
{
public:
Client(TkTestStrict* test, std::vector<ExtSyncEvent*>& syncBuffer) : Base(test), m_syncBuffer(syncBuffer) {}
protected:
virtual void impl() override
{
ExtSync* sync = ExtSync::create();
// fill map
for (auto& family : families)
{
std::vector<TkActor*> actors(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
auto& actorsSet = m_actorsPerFamily[family];
for (auto actor : actors)
EXPECT_TRUE(actorsSet.insert(actor->getIndex()).second);
}
// subscribe
for (auto& family : families)
{
family->addListener(*this);
}
// apply sync buffer
sync->applySyncBuffer(*NvBlastTkFrameworkGet(), (const Nv::Blast::ExtSyncEvent**)m_syncBuffer.data(), static_cast<uint32_t>(m_syncBuffer.size()), m_group);
// check map
for (auto& family : families)
{
std::vector<TkActor*> actors(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
std::set<uint32_t> actorsSet;
for (auto actor : actors)
EXPECT_TRUE(actorsSet.insert(actor->getIndex()).second);
EXPECT_TRUE(m_actorsPerFamily[family] == actorsSet);
}
// unsubscribe
for (auto& family : families)
{
family->removeListener(*this);
}
m_test->m_groupTM->process();
m_test->m_groupTM->wait();
sync->release();
}
// listen for Split event and update actors map
virtual void receive(const TkEvent* events, uint32_t eventCount) override
{
for (size_t i = 0; i < eventCount; ++i)
{
const TkEvent& e = events[i];
switch (e.type)
{
case (TkEvent::Split) :
{
const TkSplitEvent* splitEvent = e.getPayload<TkSplitEvent>();
auto& actorsSet = m_actorsPerFamily[splitEvent->parentData.family];
if (!isInvalidIndex(splitEvent->parentData.index))
{
EXPECT_EQ((size_t)1, actorsSet.erase(splitEvent->parentData.index));
}
for (size_t i = 0; i < splitEvent->numChildren; ++i)
{
TkActor* a = splitEvent->children[i];
EXPECT_TRUE(actorsSet.insert(a->getIndex()).second);
}
break;
}
case (TkEvent::FractureCommand) :
{
break;
}
case (TkEvent::JointUpdate) :
{
FAIL();
break;
}
default:
break;
}
}
}
private:
std::map<TkFamily*, std::set<uint32_t>> m_actorsPerFamily;
std::vector<ExtSyncEvent*>& m_syncBuffer;
};
TEST_F(TkTestStrict, SyncTest1)
{
this->createFramework();
std::vector<ExtSyncEvent*> syncBuffer;
std::stringstream serverFinalState;
{
Server s(this, syncBuffer);
s.run(serverFinalState);
}
EXPECT_TRUE(syncBuffer.size() > 0);
std::stringstream clientFinalState;
{
Client c(this, syncBuffer);
c.run(clientFinalState);
}
for (auto e : syncBuffer)
{
e->release();
}
syncBuffer.clear();
EXPECT_EQ(serverFinalState.str(), clientFinalState.str());
this->releaseFramework();
}
| 10,886 | C++ | 30.648256 | 163 | 0.56513 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/APITests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "BlastBaseTest.h"
#include "NvBlastIndexFns.h"
#include "NvBlastExtDamageShaders.h"
#include <algorithm>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Utils / Tests Common
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using namespace Nv::Blast;
class APITest : public BlastBaseTest < NvBlastMessage::Error, 1 >
{
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Tests
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST_F(APITest, Basic)
{
// create asset
const NvBlastAssetDesc& assetDesc = g_assetDescs[0];
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, messageLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
NvBlastExtRadialDamageDesc damage = {
10.0f, // compressive
{ 0.0f, 0.0f, 0.0f }, // position
4.0f, // min radius - maximum damage
6.0f // max radius - zero damage
};
NvBlastBondFractureData outFracture[12]; /*num lower-support chunks + bonds?*/
NvBlastFractureBuffers events;
events.bondFractureCount = 12;
events.bondFractures = outFracture;
events.chunkFractureCount = 0;
events.chunkFractures = nullptr;
NvBlastExtProgramParams programParams = { &damage, nullptr };
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
nullptr
};
NvBlastActorGenerateFracture(&events, actor, program, &programParams, messageLog, nullptr);
NvBlastActorApplyFracture(&events, actor, &events, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_EQ(12, events.bondFractureCount);
NvBlastActor* newActors[8]; /* num lower-support chunks? plus space for deletedActor */
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors;
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, messageLog));
size_t newActorsCount = NvBlastActorSplit(&result, actor, 8, scratch.data(), messageLog, nullptr);
EXPECT_EQ(8, newActorsCount);
EXPECT_EQ(true, result.deletedActor == actor);
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], messageLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
alignedFree(asset);
}
TEST_F(APITest, DamageBondsCompressive)
{
const size_t bondsCount = 6;
const NvBlastChunkDesc c_chunks[8] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 7 }
};
const NvBlastBondDesc c_bonds[bondsCount] =
{
{ { {-1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 2.0f, 0.0f }, 0 }, { 1, 2 } },
{ { {-1.0f, 0.0f, 0.0f }, 1.0f, {-1.0f, 2.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 0.0f,-1.0f, 0.0f }, 1.0f, {-2.0f, 1.0f, 0.0f }, 0 }, { 3, 4 } },
{ { { 0.0f,-1.0f, 0.0f }, 1.0f, {-2.0f,-1.0f, 0.0f }, 0 }, { 4, 5 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, {-1.0f,-2.0f, 0.0f }, 0 }, { 5, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f,-2.0f, 0.0f }, 0 }, { 6, 7 } }
};
// create asset
const NvBlastAssetDesc assetDesc = { 8, c_chunks, bondsCount, c_bonds };
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, messageLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// get graph nodes check
std::vector<uint32_t> graphNodeIndices;
graphNodeIndices.resize(NvBlastActorGetGraphNodeCount(actor, nullptr));
uint32_t graphNodesCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices.data(), (uint32_t)graphNodeIndices.size(), actor, nullptr);
EXPECT_EQ(graphNodesCount, 7);
NvBlastExtRadialDamageDesc damage = {
1.0f, // compressive
{ 4.0f, 2.0f, 0.0f }, // position
4.0f, // min radius - maximum damage
6.0f // max radius - zero damage
}; // linear falloff
NvBlastBondFractureData outCommands[bondsCount] = {
{ UINT32_MAX, UINT32_MAX, UINT32_MAX, 0 },
{ UINT32_MAX, UINT32_MAX, UINT32_MAX, 0 },
{ UINT32_MAX, UINT32_MAX, UINT32_MAX, 0 },
{ UINT32_MAX, UINT32_MAX, UINT32_MAX, 0 },
{ UINT32_MAX, UINT32_MAX, UINT32_MAX, 0 },
{ UINT32_MAX, UINT32_MAX, UINT32_MAX, 0 },
};
NvBlastFractureBuffers commands = {
6, 0, outCommands, nullptr
};
NvBlastExtProgramParams programParams = { &damage, nullptr };
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
nullptr
};
NvBlastActorGenerateFracture(&commands, actor, program, &programParams, messageLog, nullptr);
ASSERT_EQ(3, commands.bondFractureCount);
ASSERT_EQ(0, commands.chunkFractureCount);
// node indices in _graph_ chunks
NvBlastBondFractureData expectedCommand[] = {
{ 0, 0, 1, 1.0f },
{ 0, 1, 2, 0.5f },
{ 0, 5, 6, 0.5f }
};
for (int i = 0; i < 3; i++)
{
EXPECT_EQ(expectedCommand[i].nodeIndex0, outCommands[i].nodeIndex0);
EXPECT_EQ(expectedCommand[i].nodeIndex1, outCommands[i].nodeIndex1);
EXPECT_EQ(expectedCommand[i].health, outCommands[i].health);
}
const bool actorReleaseResult = NvBlastActorDeactivate(actor, messageLog);
EXPECT_TRUE(actorReleaseResult);
alignedFree(family);
alignedFree(asset);
}
TEST_F(APITest, DirectFractureKillsChunk)
{
// 1--2
// | |
// 3--4 <-- kill 4
const NvBlastChunkDesc c_chunks[9] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 4, NvBlastChunkDesc::NoFlags, 5 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 4, NvBlastChunkDesc::NoFlags, 6 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 4, NvBlastChunkDesc::NoFlags, 7 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 4, NvBlastChunkDesc::NoFlags, 8 },
};
const NvBlastBondDesc c_bonds[4] =
{
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f, 1.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f,-1.0f, 0.0f }, 0 }, { 3, 4 } },
{ { { 0.0f,-1.0f, 0.0f }, 1.0f, {-1.0f, 0.0f, 0.0f }, 0 }, { 1, 3 } },
{ { { 0.0f,-1.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 2, 4 } },
};
NvBlastAssetDesc assetDesc;
assetDesc.chunkCount = 9;
assetDesc.chunkDescs = c_chunks;
assetDesc.bondCount = 4;
assetDesc.bondDescs = c_bonds;
// create asset
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, messageLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
NvBlastChunkFractureData fractureCmd;
fractureCmd.chunkIndex = 4;
fractureCmd.health = 1.0f;
NvBlastFractureBuffers commands = { 0, 1, nullptr, &fractureCmd };
NvBlastChunkFractureData fractureEvt;
NvBlastFractureBuffers events = { 0, 1, nullptr, &fractureEvt };
NvBlastActorApplyFracture(&events, actor, &commands, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_EQ(1, events.chunkFractureCount);
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, messageLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, messageLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), messageLog, nullptr);
newActors.resize(newActorsCount);
EXPECT_EQ(5, newActorsCount);
EXPECT_EQ(actor, result.deletedActor);
// check newActors contain original actor
EXPECT_TRUE(std::any_of(newActors.begin(), newActors.end(), [&](const NvBlastActor* a) { return actor == a; }));
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], messageLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
alignedFree(asset);
}
TEST_F(APITest, DirectFractureKillsIslandRootChunk)
{
// 1--2 <-- kill 1
// | |
// 3--4
const NvBlastChunkDesc c_chunks[9] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 5 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 6 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 7 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 8 },
};
const NvBlastBondDesc c_bonds[4] =
{
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f, 1.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f,-1.0f, 0.0f }, 0 }, { 3, 4 } },
{ { { 0.0f,-1.0f, 0.0f }, 1.0f, {-1.0f, 0.0f, 0.0f }, 0 }, { 1, 3 } },
{ { { 0.0f,-1.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 2, 4 } },
};
NvBlastAssetDesc assetDesc;
assetDesc.chunkCount = 9;
assetDesc.chunkDescs = c_chunks;
assetDesc.bondCount = 4;
assetDesc.bondDescs = c_bonds;
// create asset
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, messageLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
NvBlastChunkFractureData fractureCmd;
fractureCmd.chunkIndex = 1;
fractureCmd.health = 1.0f;
NvBlastFractureBuffers commands = { 0, 1, nullptr, &fractureCmd };
NvBlastChunkFractureData fractureEvt;
NvBlastFractureBuffers events = { 0, 1, nullptr, &fractureEvt };
NvBlastActorApplyFracture(&events, actor, &commands, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_EQ(1, events.chunkFractureCount);
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, messageLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, messageLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), messageLog, nullptr);
newActors.resize(newActorsCount);
EXPECT_EQ(5, newActorsCount);
EXPECT_EQ(actor, result.deletedActor);
// check if newActors don't contain original actor
EXPECT_TRUE(!std::any_of(newActors.begin(), newActors.end(), [&](const NvBlastActor* a) { return actor == a; }));
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], messageLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
alignedFree(asset);
}
TEST_F(APITest, SubsupportFracture)
{
const NvBlastAssetDesc& assetDesc = g_assetDescs[1]; // cube with subsupport
// create asset with chunk map
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, messageLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// first set of fracture commands
NvBlastChunkFractureData f1 = { 0, 1, 2.0f };
NvBlastChunkFractureData f3 = { 0, 3, 0.5f };
NvBlastChunkFractureData f5 = { 0, 5, 1.0f };
NvBlastChunkFractureData f7 = { 0, 7, 1.0f };
std::vector<NvBlastChunkFractureData> chunkFractureData;
chunkFractureData.reserve(assetDesc.chunkCount);
chunkFractureData.push_back(f1);
chunkFractureData.push_back(f3);
chunkFractureData.push_back(f5);
chunkFractureData.push_back(f7);
ASSERT_EQ(assetDesc.chunkCount, chunkFractureData.capacity());
ASSERT_EQ(4, chunkFractureData.size());
NvBlastFractureBuffers target = { 0, static_cast<uint32_t>(chunkFractureData.capacity()), nullptr, chunkFractureData.data() };
{
NvBlastFractureBuffers events = target;
NvBlastFractureBuffers commands = { 0, static_cast<uint32_t>(chunkFractureData.size()), nullptr, chunkFractureData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
ASSERT_EQ(4 + 8, events.chunkFractureCount); // all requested chunks take damage, and the children of one of them
}
// re-apply same set of commands
chunkFractureData.clear();
chunkFractureData.reserve(assetDesc.chunkCount);
chunkFractureData.push_back(f1);
chunkFractureData.push_back(f3);
chunkFractureData.push_back(f5);
chunkFractureData.push_back(f7);
ASSERT_EQ(assetDesc.chunkCount, chunkFractureData.capacity());
ASSERT_EQ(4, chunkFractureData.size());
{
NvBlastFractureBuffers events = target;
NvBlastFractureBuffers commands = { 0, static_cast<uint32_t>(chunkFractureData.size()), nullptr, chunkFractureData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
ASSERT_EQ(1, events.chunkFractureCount); // f3 has broken the chunk
}
// fracture all support chunks
// the chunks from the previous fractures must not be reported again (since they are all broken already)
NvBlastChunkFractureData f2 = { 0, 2, 2.0f }; // will damage chunk and children
NvBlastChunkFractureData f4 = { 0, 4, 0.5f }; // will damage chunk without creating children on split
NvBlastChunkFractureData f6 = { 0, 6, 2.0f }; // will damage chunk and children
NvBlastChunkFractureData f8 = { 0, 8, 1.0f }; // will damage chunk
chunkFractureData.clear();
chunkFractureData.reserve(assetDesc.chunkCount);
chunkFractureData.push_back(f1);
chunkFractureData.push_back(f2);
chunkFractureData.push_back(f3);
chunkFractureData.push_back(f4);
chunkFractureData.push_back(f5);
chunkFractureData.push_back(f6);
chunkFractureData.push_back(f7);
chunkFractureData.push_back(f8);
ASSERT_EQ(assetDesc.chunkCount, chunkFractureData.capacity());
ASSERT_EQ(8, chunkFractureData.size());
NvBlastFractureBuffers events = target;
{
NvBlastFractureBuffers commands = { 0, static_cast<uint32_t>(chunkFractureData.size()), nullptr, chunkFractureData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
ASSERT_EQ(4 + 8 + 8, events.chunkFractureCount); // the new fracture commands all apply, plus two of them damage their children too
}
for (size_t i = 0; i < events.chunkFractureCount; i++)
{
const uint32_t chunkIndex = events.chunkFractures[i].chunkIndex;
ASSERT_TRUE(chunkIndex != 1);
ASSERT_TRUE(chunkIndex != 3);
ASSERT_TRUE(chunkIndex != 5);
ASSERT_TRUE(chunkIndex != 7);
// literal values come from g_cube2ChunkDescs
bool isInSupportRange = chunkIndex <= 8 && chunkIndex >= 1;
bool isChildOfTwo = chunkIndex <= 24 && chunkIndex >= 17;
bool isChildOfSix = chunkIndex <= 56 && chunkIndex >= 49;
ASSERT_TRUE(isInSupportRange || isChildOfTwo || isChildOfSix);
}
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, messageLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, messageLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), messageLog, nullptr);
newActors.resize(newActorsCount);
EXPECT_EQ(64 - 8 + 1, newActorsCount);
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], messageLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
alignedFree(asset);
}
static bool hasWarned = false;
static void myLog(int type, const char* msg, const char* file, int line)
{
BlastBaseTest<-1, 0>::messageLog(type, msg, file, line);
hasWarned = true;
}
#define EXPECT_WARNING EXPECT_TRUE(hasWarned); hasWarned=false;
#define EXPECT_NO_WARNING EXPECT_FALSE(hasWarned); hasWarned=false;
TEST_F(APITest, FractureNoEvents)
{
static const uint32_t GUARD = 0xb1a57;
const uint32_t chunksCount = 17;
const NvBlastChunkDesc c_chunks[chunksCount] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 5 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 6 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 2, NvBlastChunkDesc::NoFlags, 7 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 2, NvBlastChunkDesc::NoFlags, 8 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 5, NvBlastChunkDesc::NoFlags, 9 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 5, NvBlastChunkDesc::NoFlags, 10 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 6, NvBlastChunkDesc::NoFlags, 11 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 6, NvBlastChunkDesc::NoFlags, 12 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 7, NvBlastChunkDesc::NoFlags, 13 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 7, NvBlastChunkDesc::NoFlags, 14 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 8, NvBlastChunkDesc::NoFlags, 15 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 8, NvBlastChunkDesc::NoFlags, 16 },
};
const NvBlastBondDesc c_bonds[3] =
{
// normal, area, centroid, userdata, chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 0.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 3.0f, 0.0f, 0.0f }, 0 }, { 3, 4 } },
};
NvBlastAssetDesc assetDesc = { chunksCount, c_chunks, 3, c_bonds };
// create asset with chunk map
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, myLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, myLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), myLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
std::vector<NvBlastChunkFractureData> cfData;
cfData.resize(0 + 1);
cfData[cfData.size() - 1].userdata = GUARD;
std::vector<NvBlastBondFractureData> bfData;
NvBlastChunkFractureData command[] =
{
{ 0, 1, 10.0f },
{ 0, 2, 10.0f },
};
NvBlastFractureBuffers commands = { 0, 2, nullptr, command };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_NO_WARNING; // events can be null
EXPECT_EQ(GUARD, cfData[cfData.size() - 1].userdata);
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
newActors.resize(newActorsCount);
EXPECT_EQ(9, newActorsCount);
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], myLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
alignedFree(asset);
EXPECT_NO_WARNING;
}
TEST_F(APITest, FractureBufferLimits)
{
static const uint32_t GUARD = 0xb1a57;
const uint32_t chunksCount = 17;
const NvBlastChunkDesc c_chunks[chunksCount] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 5 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 6 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 2, NvBlastChunkDesc::NoFlags, 7 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 2, NvBlastChunkDesc::NoFlags, 8 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 5, NvBlastChunkDesc::NoFlags, 9 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 5, NvBlastChunkDesc::NoFlags, 10 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 6, NvBlastChunkDesc::NoFlags, 11 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 6, NvBlastChunkDesc::NoFlags, 12 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 7, NvBlastChunkDesc::NoFlags, 13 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 7, NvBlastChunkDesc::NoFlags, 14 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 8, NvBlastChunkDesc::NoFlags, 15 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 8, NvBlastChunkDesc::NoFlags, 16 },
};
const NvBlastBondDesc c_bonds[3] =
{
// normal, area, centroid, userdata, chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 0.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 3.0f, 0.0f, 0.0f }, 0 }, { 3, 4 } },
};
NvBlastAssetDesc assetDesc = { chunksCount, c_chunks, 3, c_bonds };
{
// create asset with chunk map
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, myLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, myLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), myLog);
EXPECT_TRUE(asset != nullptr);
for (uint32_t i = 0; i < 14; i++)
{
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
std::vector<NvBlastChunkFractureData> cfData;
cfData.resize(i + 1);
cfData[cfData.size() - 1].userdata = GUARD;
std::vector<NvBlastBondFractureData> bfData;
NvBlastChunkFractureData command[] =
{
{ 0, 1, 10.0f },
{ 0, 2, 10.0f },
};
NvBlastFractureBuffers commands = { 0, 2, nullptr, command };
NvBlastFractureBuffers events = { static_cast<uint32_t>(bfData.size()), static_cast<uint32_t>(cfData.size()) - 1, bfData.data(), cfData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, myLog, nullptr);
EXPECT_WARNING;
EXPECT_EQ(GUARD, cfData[cfData.size() - 1].userdata);
EXPECT_EQ(i, events.chunkFractureCount);
for (uint32_t i = 0; i < events.chunkFractureCount; i++)
{
EXPECT_EQ(events.chunkFractures[i].chunkIndex, events.chunkFractures[i].userdata);
}
EXPECT_TRUE(NvBlastActorDeactivate(actor, myLog));
alignedFree(family);
}
{
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
std::vector<NvBlastChunkFractureData> cfData;
cfData.resize(14 + 1);
cfData[cfData.size() - 1].userdata = GUARD;
std::vector<NvBlastBondFractureData> bfData;
NvBlastChunkFractureData command[] =
{
{ 0, 1, 10.0f },
{ 0, 2, 10.0f },
};
NvBlastFractureBuffers commands = { 0, 2, nullptr, command };
NvBlastFractureBuffers events = { static_cast<uint32_t>(bfData.size()), static_cast<uint32_t>(cfData.size()) - 1, bfData.data(), cfData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_NO_WARNING;
EXPECT_EQ(14, events.chunkFractureCount);
for (uint32_t i = 0; i < events.chunkFractureCount; i++)
{
EXPECT_EQ(events.chunkFractures[i].chunkIndex, events.chunkFractures[i].userdata);
}
ASSERT_EQ(GUARD, cfData[cfData.size() - 1].userdata);
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
newActors.resize(newActorsCount);
EXPECT_EQ(9, newActorsCount);
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], myLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
}
alignedFree(asset);
}
EXPECT_NO_WARNING;
}
TEST_F(APITest, FractureBufferLimitsInSitu)
{
static const uint32_t GUARD = 0xb1a57;
const uint32_t chunksCount = 17;
const NvBlastChunkDesc c_chunks[chunksCount] =
{
// cenroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 5 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 6 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 2, NvBlastChunkDesc::NoFlags, 7 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 2, NvBlastChunkDesc::NoFlags, 8 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 5, NvBlastChunkDesc::NoFlags, 9 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 5, NvBlastChunkDesc::NoFlags, 10 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 6, NvBlastChunkDesc::NoFlags, 11 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 6, NvBlastChunkDesc::NoFlags, 12 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 7, NvBlastChunkDesc::NoFlags, 13 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 7, NvBlastChunkDesc::NoFlags, 14 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 8, NvBlastChunkDesc::NoFlags, 15 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 8, NvBlastChunkDesc::NoFlags, 16 },
};
const NvBlastBondDesc c_bonds[3] =
{
// normal, area, centroid, userdata, chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 0.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 3.0f, 0.0f, 0.0f }, 0 }, { 3, 4 } },
};
NvBlastAssetDesc assetDesc = { chunksCount, c_chunks, 3, c_bonds };
{
// create asset with chunk map
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, myLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, myLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), myLog);
EXPECT_TRUE(asset != nullptr);
for (uint32_t i = 0; i < 14 - 2; i++)
{
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
std::vector<NvBlastChunkFractureData> cfData;
cfData.resize(2 + i + 1);
std::vector<NvBlastBondFractureData> bfData;
cfData[0].userdata = 0;
cfData[0].chunkIndex = 1;
cfData[0].health = 10.0f;
cfData[1].userdata = 0;
cfData[1].chunkIndex = 2;
cfData[1].health = 10.0f;
cfData[2 + i].userdata = GUARD;
NvBlastFractureBuffers commands = { 0, 2, nullptr, cfData.data() };
NvBlastFractureBuffers events = { static_cast<uint32_t>(bfData.size()), static_cast<uint32_t>(cfData.size()) - 1, bfData.data(), cfData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_WARNING;
EXPECT_EQ(GUARD, cfData[cfData.size() - 1].userdata);
EXPECT_EQ(2 + i, events.chunkFractureCount);
for (uint32_t i = 0; i < events.chunkFractureCount; i++)
{
EXPECT_EQ(events.chunkFractures[i].chunkIndex, events.chunkFractures[i].userdata);
}
EXPECT_TRUE(NvBlastActorDeactivate(actor, myLog));
alignedFree(family);
}
{
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
std::vector<NvBlastChunkFractureData> cfData;
cfData.resize(14 + 1);
cfData[cfData.size() - 1].userdata = GUARD;
std::vector<NvBlastBondFractureData> bfData;
cfData[0].userdata = 0;
cfData[0].chunkIndex = 1;
cfData[0].health = 10.0f;
cfData[1].userdata = 0;
cfData[1].chunkIndex = 2;
cfData[1].health = 10.0f;
cfData[14].userdata = GUARD;
NvBlastFractureBuffers commands = { 0, 2, nullptr, cfData.data() };
NvBlastFractureBuffers events = { static_cast<uint32_t>(bfData.size()), static_cast<uint32_t>(cfData.size()) - 1, bfData.data(), cfData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_NO_WARNING;
EXPECT_EQ(14, events.chunkFractureCount);
for (uint32_t i = 0; i < events.chunkFractureCount; i++)
{
EXPECT_EQ(events.chunkFractures[i].chunkIndex, events.chunkFractures[i].userdata);
}
ASSERT_EQ(GUARD, cfData[cfData.size() - 1].userdata);
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
newActors.resize(newActorsCount);
EXPECT_EQ(9, newActorsCount);
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], myLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
}
alignedFree(asset);
}
EXPECT_NO_WARNING;
}
/*
This test checks if bond or chunk fracture commands passed to NvBlastActorApplyFracture do not correspond to
the actor passed in they (commands) will be ignored and warning message will be fired.
*/
TEST_F(APITest, FractureWarnAndFilterOtherActorCommands)
{
const uint32_t chunksCount = 17;
const NvBlastChunkDesc c_chunks[chunksCount] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 1, NvBlastChunkDesc::NoFlags, 5 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 1, NvBlastChunkDesc::NoFlags, 6 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 2, NvBlastChunkDesc::NoFlags, 7 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 2, NvBlastChunkDesc::NoFlags, 8 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 5, NvBlastChunkDesc::NoFlags, 9 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 5, NvBlastChunkDesc::NoFlags, 10 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 6, NvBlastChunkDesc::NoFlags, 11 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 6, NvBlastChunkDesc::NoFlags, 12 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 7, NvBlastChunkDesc::NoFlags, 13 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 7, NvBlastChunkDesc::NoFlags, 14 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 8, NvBlastChunkDesc::NoFlags, 15 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 8, NvBlastChunkDesc::NoFlags, 16 },
};
const NvBlastBondDesc c_bonds[4] =
{
// normal, area, centroid, userdata, chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 }, { 3, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 }, { 1, 3 } }
};
NvBlastAssetDesc assetDesc = { chunksCount, c_chunks, 4, c_bonds };
// create asset with chunk map
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, myLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, myLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), myLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
// split in 2
std::vector<NvBlastActor*> actors;
{
NvBlastBondFractureData command[] =
{
{ 0, 0, 2, 10.0f },
{ 0, 1, 2, 10.0f }
};
NvBlastFractureBuffers commands = { 2, 0, command, nullptr };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(2, newActorsCount);
EXPECT_EQ(actor, result.deletedActor);
actors.insert(actors.begin(), result.newActors, result.newActors + newActorsCount);
}
// damage bonds belonging to other actors, nothing expected to be broken
{
for (uint32_t i = 0; i < actors.size(); ++i)
{
NvBlastActor* actor = actors[i];
NvBlastActor* otherActor = actors[(i + 1) % 2];
// get graph nodes check
std::vector<uint32_t> graphNodeIndices;
graphNodeIndices.resize(NvBlastActorGetGraphNodeCount(otherActor, nullptr));
uint32_t graphNodesCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices.data(), (uint32_t)graphNodeIndices.size(), otherActor, nullptr);
EXPECT_EQ(graphNodesCount, 2);
NvBlastBondFractureData command[] =
{
{ 0, graphNodeIndices[0], graphNodeIndices[1], 10.0f }
};
NvBlastFractureBuffers commands = { 1, 0, command, nullptr };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_WARNING;
EXPECT_FALSE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(0, newActorsCount);
EXPECT_EQ(nullptr, result.deletedActor);
}
}
// damage bonds, split actors in 2 each
std::vector<NvBlastActor*> actors2;
{
for (uint32_t i = 0; i < 2; ++i)
{
NvBlastActor* actor = actors[i];
// get graph nodes check
std::vector<uint32_t> graphNodeIndices;
graphNodeIndices.resize(NvBlastActorGetGraphNodeCount(actor, nullptr));
uint32_t graphNodesCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices.data(), (uint32_t)graphNodeIndices.size(), actor, nullptr);
EXPECT_EQ(graphNodesCount, 2);
NvBlastBondFractureData command[] =
{
{ 0, graphNodeIndices[0], graphNodeIndices[1], 10.0f }
};
NvBlastFractureBuffers commands = { 1, 0, command, nullptr };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(2, newActorsCount);
EXPECT_EQ(actor, result.deletedActor);
actors2.insert(actors2.begin(), result.newActors, result.newActors + newActorsCount);
}
}
// damage chunk belonging to other actor (expect no split or damage taken)
{
for (uint32_t i = 0; i < actors.size(); ++i)
{
NvBlastActor* actor = actors[i];
NvBlastActor* otherActor = actors[(i + 1) % 2];
uint32_t chunkToDamage;
NvBlastActorGetVisibleChunkIndices(&chunkToDamage, 1, otherActor, myLog);
NvBlastChunkFractureData command[] =
{
{ 0, chunkToDamage, 0.9f },
};
NvBlastFractureBuffers commands = { 0, 1, nullptr, command };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_WARNING;
EXPECT_FALSE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(0, newActorsCount);
EXPECT_EQ(nullptr, result.deletedActor);
EXPECT_EQ(1, NvBlastActorGetVisibleChunkCount(actor, myLog));
uint32_t chunkIndex;
NvBlastActorGetVisibleChunkIndices(&chunkIndex, 1, actor, myLog);
EXPECT_NE(chunkToDamage, chunkIndex);
}
}
for (NvBlastActor* actor : actors2)
{
NvBlastActorDeactivate(actor, myLog);
}
alignedFree(family);
alignedFree(asset);
EXPECT_NO_WARNING;
}
/**
If duplicate bonds are passed asset create routine will ignore them (but fire warning)
We pass duplicated bonds to world chunk and fully fracture actor once.
*/
TEST_F(APITest, FractureWithBondDuplicates)
{
const uint32_t chunksCount = 17;
const NvBlastChunkDesc c_chunks[chunksCount] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 8 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 5, NvBlastChunkDesc::NoFlags, 9 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 5, NvBlastChunkDesc::NoFlags, 10 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 6, NvBlastChunkDesc::NoFlags, 11 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 6, NvBlastChunkDesc::NoFlags, 12 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 7, NvBlastChunkDesc::NoFlags, 13 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 7, NvBlastChunkDesc::NoFlags, 14 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 8, NvBlastChunkDesc::NoFlags, 15 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 8, NvBlastChunkDesc::NoFlags, 16 },
};
const uint32_t bondCount = 20;
const uint32_t world = ~(uint32_t)0; // world chunk => invalid index
const NvBlastBondDesc c_bonds[bondCount] =
{
// normal, area, centroid, userdata, chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 1, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 2, 1 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 2, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 2, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 3, 1 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 4, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 4, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 4, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 5, 1 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 5, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 6, 5 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 6, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 6, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 6, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 7, 5 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 7, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 8, 7 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 8, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 8, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 8, world } }
};
NvBlastAssetDesc assetDesc = { chunksCount, c_chunks, bondCount, c_bonds };
// create asset
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, myLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, myLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), myLog);
EXPECT_WARNING;
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
// split in 2
std::vector<NvBlastActor*> actors;
{
NvBlastExtRadialDamageDesc damage = {
10.0f, // compressive
{ 0.0f, 0.0f, 0.0f }, // position
100.0f, // min radius - maximum damage
100.0f // max radius - zero damage
};
NvBlastBondFractureData outBondFracture[bondCount];
NvBlastChunkFractureData outChunkFracture[chunksCount];
NvBlastFractureBuffers events;
events.bondFractureCount = 2;
events.bondFractures = outBondFracture;
events.chunkFractureCount = 2;
events.chunkFractures = outChunkFracture;
NvBlastExtProgramParams programParams = { &damage, nullptr };
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
NvBlastExtFalloffSubgraphShader
};
NvBlastActorGenerateFracture(&events, actor, program, &programParams, myLog, nullptr);
NvBlastActorApplyFracture(nullptr, actor, &events, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(8, newActorsCount);
EXPECT_EQ(actor, result.deletedActor);
actors.insert(actors.begin(), result.newActors, result.newActors + newActorsCount);
}
for (NvBlastActor* actor : actors)
{
NvBlastActorDeactivate(actor, myLog);
}
alignedFree(family);
alignedFree(asset);
EXPECT_NO_WARNING;
}
#if 0
TEST(APITest, UserChunkMap)
{
for (int i = 0; i < 2; ++i)
{
// Choose descriptor list
const NvBlastAssetDesc* descs = nullptr;
size_t size = 0;
switch (i)
{
case 0:
descs = g_assetDescs;
size = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
break;
case 1:
descs = g_assetDescsMissingCoverage;
size = sizeof(g_assetDescsMissingCoverage) / sizeof(g_assetDescsMissingCoverage[0]);
break;
default:
continue;
}
// Iterate over list
for (size_t j = 0; j < size; ++j)
{
// Create asset
const NvBlastAssetDesc* desc = descs + j;
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(desc));
std::vector<uint32_t> chunkMap(desc->chunkCount);
NvBlastAsset* asset = NvBlastCreateAsset(&chunkMap[0], desc, alignedAlloc<malloc>, scratch.data(), nullptr);
EXPECT_TRUE(asset);
// Test map
Nv::Blast::Asset& a = static_cast<Nv::Blast::Asset&>(asset);
uint32_t supportChunkCount = 0;
uint32_t subsupportChunkCount = 0;
for (uint32_t i = 0; i < desc->chunkCount; ++i)
{
const uint32_t map = chunkMap[i];
if (Nv::Blast::isInvalidIndex(map))
{
continue;
}
else if (map < a.m_firstSubsupportChunkIndex)
{
EXPECT_LT(map, asset.m_graph.m_nodeCount);
++supportChunkCount;
}
else
{
EXPECT_LT(map, asset.m_chunkCount);
EXPECT_GE(map, asset.m_graph.m_nodeCount);
++subsupportChunkCount;
}
}
EXPECT_EQ(supportChunkCount, asset.m_graph.m_nodeCount);
EXPECT_EQ(subsupportChunkCount, a.getLowerSupportChunkCount() - asset.m_graph.m_nodeCount);
// Release asset
NvBlastAssetRelease(asset, free, nullptr);
}
}
}
#endif
TEST_F(APITest, NoBondsSausage)
{
// create asset
const NvBlastChunkDesc c_chunks[4] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 1, NvBlastChunkDesc::NoFlags, 2 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 2, NvBlastChunkDesc::NoFlags, 3 }
};
NvBlastAssetDesc assetDesc;
assetDesc.chunkCount = 4;
assetDesc.chunkDescs = c_chunks;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, messageLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), messageLog);
const NvBlastChunk* chunks = NvBlastAssetGetChunks(asset, messageLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// check visible chunk
{
EXPECT_EQ(NvBlastActorGetVisibleChunkCount(actor, messageLog), 1);
uint32_t chunkIndex;
NvBlastActorGetVisibleChunkIndices(&chunkIndex, 1, actor, messageLog);
EXPECT_EQ(chunks[chunkIndex].userData, 0);
}
// damage
NvBlastExtRadialDamageDesc damage = {
10.0f, // compressive
{ 0.0f, 0.0f, 0.0f }, // position
4.0f, // min radius - maximum damage
6.0f // max radius - zero damage
};
NvBlastBondFractureData outBondFracture[2];
NvBlastChunkFractureData outChunkFracture[2];
NvBlastFractureBuffers events;
events.bondFractureCount = 2;
events.bondFractures = outBondFracture;
events.chunkFractureCount = 2;
events.chunkFractures = outChunkFracture;
NvBlastExtProgramParams programParams = { &damage, nullptr };
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
NvBlastExtFalloffSubgraphShader
};
NvBlastActorGenerateFracture(&events, actor, program, &programParams, messageLog, nullptr);
NvBlastActorApplyFracture(&events, actor, &events, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_EQ(0, events.bondFractureCount);
EXPECT_EQ(1, events.chunkFractureCount);
// split
NvBlastActor* newActors[8]; /* num lower-support chunks? plus space for deletedActor */
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors;
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, messageLog));
size_t newActorsCount = NvBlastActorSplit(&result, actor, 8, scratch.data(), messageLog, nullptr);
EXPECT_EQ(1, newActorsCount);
EXPECT_EQ(true, result.deletedActor == actor);
// check visible chunk
{
EXPECT_EQ(NvBlastActorGetVisibleChunkCount(result.newActors[0], messageLog), 1);
uint32_t chunkIndex;
NvBlastActorGetVisibleChunkIndices(&chunkIndex, 1, result.newActors[0], messageLog);
EXPECT_EQ(chunks[chunkIndex].userData, 3);
}
// release all
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], messageLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
alignedFree(asset);
}
TEST_F(APITest, SplitOnlyWhenNecessary)
{
static const uint32_t GUARD = 0xb1a57;
const uint32_t chunksCount = 17;
const NvBlastChunkDesc c_chunks[chunksCount] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 1, NvBlastChunkDesc::NoFlags, 5 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 1, NvBlastChunkDesc::NoFlags, 6 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 2, NvBlastChunkDesc::NoFlags, 7 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 2, NvBlastChunkDesc::NoFlags, 8 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 5, NvBlastChunkDesc::NoFlags, 9 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 5, NvBlastChunkDesc::NoFlags, 10 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 6, NvBlastChunkDesc::NoFlags, 11 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 6, NvBlastChunkDesc::NoFlags, 12 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 7, NvBlastChunkDesc::NoFlags, 13 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 7, NvBlastChunkDesc::NoFlags, 14 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 8, NvBlastChunkDesc::NoFlags, 15 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 8, NvBlastChunkDesc::NoFlags, 16 },
};
const NvBlastBondDesc c_bonds[4] =
{
// normal, area, centroid, userdata, chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 3, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 1, 3 } }
};
NvBlastAssetDesc assetDesc = { chunksCount, c_chunks, 4, c_bonds };
// create asset with chunk map
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, myLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, myLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), myLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
// damage health only (expect no split)
{
NvBlastBondFractureData command[] =
{
{ 0, 0, 1, 0.99f },
{ 0, 1, 2, 0.50f },
{ 0, 2, 3, 0.01f }
};
NvBlastFractureBuffers commands = { 3, 0, command, nullptr };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_FALSE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(0, newActorsCount);
EXPECT_EQ(nullptr, result.deletedActor);
EXPECT_EQ(1, NvBlastActorGetVisibleChunkCount(actor, myLog));
uint32_t chunkIndex;
NvBlastActorGetVisibleChunkIndices(&chunkIndex, 1, actor, myLog);
EXPECT_EQ(0, chunkIndex);
}
// break 1 bond (expect no split)
{
NvBlastBondFractureData command[] =
{
{ 0, 0, 2, 10.0f },
};
NvBlastFractureBuffers commands = { 1, 0, command, nullptr };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(0, newActorsCount);
EXPECT_EQ(nullptr, result.deletedActor);
EXPECT_EQ(1, NvBlastActorGetVisibleChunkCount(actor, myLog));
uint32_t chunkIndex;
NvBlastActorGetVisibleChunkIndices(&chunkIndex, 1, actor, myLog);
EXPECT_EQ(0, chunkIndex);
}
// split in 4
std::vector<NvBlastActor*> actors;
{
NvBlastBondFractureData command[] =
{
{ 0, 0, 1, 10.0f },
{ 0, 1, 2, 10.0f },
{ 0, 2, 3, 10.0f }
};
NvBlastFractureBuffers commands = { 3, 0, command, nullptr };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(4, newActorsCount);
EXPECT_EQ(actor, result.deletedActor);
actors.insert(actors.begin(), result.newActors, result.newActors + newActorsCount);
}
// damage chunk's health only (expect no split)
{
for (NvBlastActor* actor : actors)
{
uint32_t chunkToDamage;
NvBlastActorGetVisibleChunkIndices(&chunkToDamage, 1, actor, myLog);
NvBlastChunkFractureData command[] =
{
{ 0, chunkToDamage, 0.9f },
};
NvBlastFractureBuffers commands = { 0, 1, nullptr, command };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_FALSE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(0, newActorsCount);
EXPECT_EQ(nullptr, result.deletedActor);
EXPECT_EQ(1, NvBlastActorGetVisibleChunkCount(actor, myLog));
uint32_t chunkIndex;
NvBlastActorGetVisibleChunkIndices(&chunkIndex, 1, actor, myLog);
EXPECT_EQ(chunkToDamage, chunkIndex);
}
}
for (NvBlastActor* actor : actors)
{
NvBlastActorDeactivate(actor, myLog);
}
alignedFree(family);
alignedFree(asset);
EXPECT_NO_WARNING;
}
#if NV_WINDOWS_FAMILY
#include <windows.h>
TEST_F(APITest,CExportsNoNameMangling)
{
//
// tests the lib-link-free approach using unmangled names (extern "C")
//
const char* dllName = "NvBlast.dll";
HMODULE dllHandle = LoadLibrary(TEXT(dllName));
DWORD error = GetLastError();
ASSERT_TRUE(dllHandle != nullptr);
// Asset functions
typedef size_t(*NvBlastGetRequiredScratchForCreateAsset)(const NvBlastAssetDesc* desc);
typedef size_t(*NvBlastGetAssetMemorySize)(const NvBlastAssetDesc* desc);
typedef NvBlastAsset*(*NvBlastCreateAsset)(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn);
NvBlastGetRequiredScratchForCreateAsset assetCreateRequiredScratch = (NvBlastGetRequiredScratchForCreateAsset)GetProcAddress(dllHandle, TEXT("NvBlastGetRequiredScratchForCreateAsset"));
ASSERT_TRUE(assetCreateRequiredScratch != nullptr);
NvBlastGetAssetMemorySize assetGetMemorySize = (NvBlastGetAssetMemorySize)GetProcAddress(dllHandle, TEXT("NvBlastGetAssetMemorySize"));
ASSERT_TRUE(assetGetMemorySize != nullptr);
NvBlastCreateAsset assetCreate = (NvBlastCreateAsset)GetProcAddress(dllHandle, TEXT("NvBlastCreateAsset"));
ASSERT_TRUE(assetCreate != nullptr);
// Family functions
typedef NvBlastFamily* (*NvBlastAssetCreateFamily)(void* mem, const NvBlastAsset* asset, NvBlastLog logFn);
typedef size_t(*NVBLASTASSETGETFAMILYMEMORYSIZE)(const NvBlastAsset* asset);
NVBLASTASSETGETFAMILYMEMORYSIZE familyGetMemorySize = (NVBLASTASSETGETFAMILYMEMORYSIZE)GetProcAddress(dllHandle, TEXT("NvBlastAssetGetFamilyMemorySize"));
ASSERT_TRUE(familyGetMemorySize != nullptr);
NvBlastAssetCreateFamily familyCreate = (NvBlastAssetCreateFamily)GetProcAddress(dllHandle, TEXT("NvBlastAssetCreateFamily"));
ASSERT_TRUE(familyCreate != nullptr);
// Actor functions
typedef size_t(*NvBlastFamilyGetRequiredScratchForCreateFirstActor)(const NvBlastFamily* family);
typedef NvBlastActor* (*NvBlastFamilyCreateFirstActor)(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn);
typedef bool(*NVBLASTACTORDEACTIVATE)(NvBlastActor* actor);
NvBlastFamilyGetRequiredScratchForCreateFirstActor actorcreaterequiredscratch = (NvBlastFamilyGetRequiredScratchForCreateFirstActor)GetProcAddress(dllHandle, TEXT("NvBlastFamilyGetRequiredScratchForCreateFirstActor"));
ASSERT_TRUE(actorcreaterequiredscratch != nullptr);
NvBlastFamilyCreateFirstActor actorCreate = (NvBlastFamilyCreateFirstActor)GetProcAddress(dllHandle, TEXT("NvBlastFamilyCreateFirstActor"));
ASSERT_TRUE(actorCreate != nullptr);
NVBLASTACTORDEACTIVATE actorRelease = (NVBLASTACTORDEACTIVATE)GetProcAddress(dllHandle, TEXT("NvBlastActorDeactivate"));
ASSERT_TRUE(actorRelease != nullptr);
const NvBlastChunkDesc c_chunks[] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 0 },
};
NvBlastAssetDesc assetDesc;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
assetDesc.chunkCount = 4;
assetDesc.chunkDescs = c_chunks;
NvBlastAsset* asset;
{
size_t requiredsize = assetCreateRequiredScratch(&assetDesc);
std::vector<char>scratch(requiredsize);
void* mem = alignedZeroedAlloc(assetGetMemorySize(&assetDesc));
asset = assetCreate(mem, &assetDesc, scratch.data(), myLog);
ASSERT_TRUE(asset != nullptr);
}
void* fmem = alignedZeroedAlloc(familyGetMemorySize(asset));
NvBlastFamily* family = familyCreate(fmem, asset, myLog);
{
NvBlastActorDesc actorD;
actorD.initialBondHealths = actorD.initialSupportChunkHealths = nullptr;
actorD.uniformInitialBondHealth = actorD.uniformInitialLowerSupportChunkHealth = 1.0f;
size_t requiredsize = actorcreaterequiredscratch(family);
std::vector<char>scratch(requiredsize);
NvBlastActor* actor = actorCreate(family, &actorD, scratch.data(), myLog);
ASSERT_TRUE(actor != nullptr);
ASSERT_TRUE(actorRelease(actor));
}
alignedFree(family);
alignedFree(asset);
EXPECT_NO_WARNING;
}
#endif
| 76,389 | C++ | 43.856136 | 222 | 0.629135 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/TkTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "TkBaseTest.h"
#include <map>
#include <random>
#include <algorithm>
#include <functional>
#include "NsMemoryBuffer.h"
#include "NvBlastTime.h"
struct ExpectedVisibleChunks
{
ExpectedVisibleChunks() :numActors(0), numChunks(0) {}
ExpectedVisibleChunks(size_t a, size_t c) :numActors(a), numChunks(c) {}
size_t numActors; size_t numChunks;
};
void testResults(std::vector<TkFamily*>& families, std::map<TkFamily*, ExpectedVisibleChunks>& expectedVisibleChunks)
{
size_t numActors = 0;
for (TkFamily* fam : families)
{
auto ex = expectedVisibleChunks[fam];
EXPECT_EQ(ex.numActors, fam->getActorCount());
numActors += ex.numActors;
std::vector<TkActor*> actors(fam->getActorCount());
fam->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (TkActor* actor : actors)
{
EXPECT_EQ(ex.numChunks, actor->getVisibleChunkCount());
}
}
size_t numActorsExpected = 0;
for (auto expected : expectedVisibleChunks)
{
numActorsExpected += expected.second.numActors;
}
EXPECT_EQ(numActorsExpected, numActors);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Tests
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST_F(TkTestStrict, CreateFramework)
{
createFramework();
releaseFramework();
}
TEST_F(TkTestStrict, CreateAsset)
{
createFramework();
createTestAssets();
releaseTestAssets();
releaseFramework();
}
TEST_F(TkTestStrict, ActorDamageNoGroup)
{
createFramework();
createTestAssets();
TkFramework* fwk = NvBlastTkFrameworkGet();
TkActorDesc actorDesc;
actorDesc.asset = testAssets[0];
TkActor* actor = fwk->createActor(actorDesc);
const size_t bondFractureCount = 4;
NvBlastFractureBuffers commands;
NvBlastBondFractureData bdata[bondFractureCount];
for (uint32_t i = 0; i < bondFractureCount; i++)
{
bdata[i].nodeIndex0 = 2 * i + 0;
bdata[i].nodeIndex1 = 2 * i + 1;
bdata[i].health = 1.0f;
}
commands.bondFractureCount = bondFractureCount;
commands.bondFractures = bdata;
commands.chunkFractureCount = 0;
commands.chunkFractures = nullptr;
actor->applyFracture(&commands, &commands);
TkFamily& family = actor->getFamily();
EXPECT_TRUE(commands.bondFractureCount == 4);
EXPECT_TRUE(actor->isPending());
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fwk->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
group->addActor(*actor);
m_groupTM->process();
m_groupTM->wait();
EXPECT_FALSE(actor->isPending());
EXPECT_EQ(2, family.getActorCount());
releaseFramework();
}
TEST_F(TkTestStrict, ActorDamageGroup)
{
TEST_ZONE_BEGIN("ActorDamageGroup");
createFramework();
createTestAssets();
TkFramework* fwk = NvBlastTkFrameworkGet();
TestFamilyTracker ftrack1, ftrack2;
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fwk->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
NvBlastExtShearDamageDesc shearDamage = getShearDamageDesc(0, 0, 0);
NvBlastExtProgramParams shearDamageParams = { &shearDamage, nullptr };
CSParams csDamage0(0, 0.0f);
NvBlastExtProgramParams csDamageParams0 = { &csDamage0, nullptr };
CSParams csDamage1(1, 0.0f);
NvBlastExtProgramParams csDamageParams1 = { &csDamage1, nullptr };
CSParams csDamage2(2, 0.0f);
NvBlastExtProgramParams csDamageParams2 = { &csDamage2, nullptr };
std::vector<TkFamily*> families;
TkFamily* trackedFamily;
std::map<TkFamily*, ExpectedVisibleChunks> expectedVisibleChunks;
{
TkActorDesc adesc(testAssets[0]);
TkActor* actor1 = fwk->createActor(adesc);
EXPECT_TRUE(actor1 != nullptr);
TkActor* actor2 = fwk->createActor(adesc);
EXPECT_TRUE(actor2 != nullptr);
expectedVisibleChunks[&actor1->getFamily()] = ExpectedVisibleChunks(8, 1); // full damage
expectedVisibleChunks[&actor2->getFamily()] = ExpectedVisibleChunks(1, 1); // not split
GeneratorAsset cube;
TkAssetDesc assetDesc;
generateCube(cube, assetDesc, 5, 2);
assetDesc.bondFlags = nullptr;
TkAsset* cubeAsset = fwk->createAsset(assetDesc);
testAssets.push_back(cubeAsset);
TkActorDesc cubeAD(cubeAsset);
TkActor* cubeActor1 = fwk->createActor(cubeAD);
EXPECT_TRUE(cubeActor1 != nullptr);
trackedFamily = &cubeActor1->getFamily();
cubeActor1->getFamily().addListener(ftrack1);
TkActor* cubeActor2 = fwk->createActor(cubeAD);
EXPECT_TRUE(cubeActor2 != nullptr);
expectedVisibleChunks[&cubeActor1->getFamily()] = ExpectedVisibleChunks(2, 4); // split in 2, 4 chunks each
expectedVisibleChunks[&cubeActor2->getFamily()] = ExpectedVisibleChunks(1, 1); // not split
ftrack1.insertActor(cubeActor1);
ftrack2.insertActor(actor1);
actor1->getFamily().addListener(ftrack2);
TEST_ZONE_BEGIN("add to groups");
group->addActor(*cubeActor1);
group->addActor(*cubeActor2);
group->addActor(*actor1);
group->addActor(*actor2);
TEST_ZONE_END("add to groups");
families.push_back(&cubeActor1->getFamily());
families.push_back(&cubeActor2->getFamily());
families.push_back(&actor1->getFamily());
families.push_back(&actor2->getFamily());
cubeActor1->damage(getCubeSlicerProgram(), &csDamageParams0);
actor1->damage(getFalloffProgram(), &radialDamageParams);
}
EXPECT_FALSE(group->endProcess());
m_groupTM->process();
m_groupTM->wait();
testResults(families, expectedVisibleChunks);
{
std::vector<TkActor*> actors(trackedFamily->getActorCount());
trackedFamily->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (TkActor* actor : actors)
{
actor->damage(getCubeSlicerProgram(), &csDamageParams1);
}
}
expectedVisibleChunks[trackedFamily] = ExpectedVisibleChunks(4, 2);
m_groupTM->process();
m_groupTM->wait();
testResults(families, expectedVisibleChunks);
{
std::vector<TkActor*> actors(trackedFamily->getActorCount());
trackedFamily->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (TkActor* actor : actors)
{
actor->damage(getCubeSlicerProgram(), &csDamageParams2);
}
}
expectedVisibleChunks[trackedFamily] = ExpectedVisibleChunks(8, 1);
m_groupTM->process();
m_groupTM->wait();
testResults(families, expectedVisibleChunks);
{
std::vector<TkActor*> actors(trackedFamily->getActorCount());
trackedFamily->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
TEST_ZONE_BEGIN("damage");
for (TkActor* actor : actors)
{
actor->damage(getFalloffProgram(), &radialDamageParams);
}
TEST_ZONE_END("damage");
}
expectedVisibleChunks[trackedFamily] = ExpectedVisibleChunks(4096, 1);
m_groupTM->process();
m_groupTM->wait();
testResults(families, expectedVisibleChunks);
{
std::vector<TkActor*> actors(trackedFamily->getActorCount());
trackedFamily->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
TEST_ZONE_BEGIN("damage");
for (TkActor* actor : actors)
{
actor->damage(getShearProgram(), &shearDamageParams);
}
TEST_ZONE_END("damage");
}
m_groupTM->process();
m_groupTM->wait();
{
std::vector<TkActor*> actors(trackedFamily->getActorCount());
trackedFamily->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
TEST_ZONE_BEGIN("damage");
for (TkActor* actor : actors)
{
actor->damage(getShearProgram(), &shearDamageParams);
}
TEST_ZONE_END("damage");
}
m_groupTM->process();
m_groupTM->wait();
group->release();
TEST_ZONE_BEGIN("family release");
trackedFamily->release();
TEST_ZONE_END("family release");
releaseTestAssets();
releaseFramework();
TEST_ZONE_END("ActorDamageGroup");
}
TEST_F(TkTestStrict, ActorDamageMultiGroup)
{
createFramework();
createTestAssets();
TkFramework* fwk = NvBlastTkFrameworkGet();
TestFamilyTracker ftrack1, ftrack2;
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group0 = fwk->createGroup(gdesc);
EXPECT_TRUE(group0 != nullptr);
TkGroup* group1 = fwk->createGroup(gdesc);
EXPECT_TRUE(group1 != nullptr);
TkGroupTaskManager& gtm1 = *TkGroupTaskManager::create(*m_taskman, group1);
TkGroupTaskManager& gtm0 = *TkGroupTaskManager::create(*m_taskman, group0);
std::vector<TkFamily*> families(2);
std::map<TkFamily*, ExpectedVisibleChunks> expectedVisibleChunks;
CSParams csDamage0(0, 0.0f);
NvBlastExtProgramParams csDamageParams0 = { &csDamage0, nullptr };
CSParams csDamage1(1, 0.0f);
NvBlastExtProgramParams csDamageParams1 = { &csDamage1, nullptr };
CSParams csDamage2(2, 0.0f);
NvBlastExtProgramParams csDamageParams2 = { &csDamage2, nullptr };
// prepare 2 equal actors/families and damage
{
GeneratorAsset cube;
TkAssetDesc assetDesc;
generateCube(cube, assetDesc, 6, 2, 5);
assetDesc.bondFlags = nullptr;
TkAsset* cubeAsset = fwk->createAsset(assetDesc);
testAssets.push_back(cubeAsset);
TkActorDesc cubeAD(cubeAsset);
TkActor* cubeActor0 = fwk->createActor(cubeAD);
EXPECT_TRUE(cubeActor0 != nullptr);
cubeActor0->getFamily().addListener(ftrack1);
TkActor* cubeActor1 = fwk->createActor(cubeAD);
EXPECT_TRUE(cubeActor1 != nullptr);
cubeActor1->getFamily().addListener(ftrack2);
ftrack1.insertActor(cubeActor0);
ftrack2.insertActor(cubeActor1);
group0->addActor(*cubeActor0);
group1->addActor(*cubeActor1);
families[0] = (&cubeActor0->getFamily());
families[1] = (&cubeActor1->getFamily());
{
cubeActor0->damage(getCubeSlicerProgram(), &csDamageParams0);
cubeActor0->damage(getCubeSlicerProgram(), &csDamageParams1);
cubeActor1->damage(getCubeSlicerProgram(), &csDamageParams0);
}
expectedVisibleChunks[families[0]] = ExpectedVisibleChunks(4, 2); // split in 4, 2 chunks each
expectedVisibleChunks[families[1]] = ExpectedVisibleChunks(2, 4); // split in 2, 4 chunks each
}
// async process 2 groups
{
EXPECT_GT(gtm0.process(2), (uint32_t)0);
EXPECT_GT(gtm1.process(2), (uint32_t)0);
uint32_t completed = 0;
while (completed < 2)
{
if (gtm0.wait(false))
completed++;
if (gtm1.wait(false))
completed++;
}
}
// checks
testResults(families, expectedVisibleChunks);
EXPECT_EQ(families[0]->getActorCount(), 4);
EXPECT_EQ(group0->getActorCount(), 4);
EXPECT_EQ(families[1]->getActorCount(), 2);
EXPECT_EQ(group1->getActorCount(), 2);
// we have group0 with 4 actors 2 chunks:
// group0: [2]' [2]' [2]' [2]' (family0')
// group1: [4]'' [4]'' (family1'')
// rearrange:
// group0: [2]' [2]' [4]''
// group1: [4]'' [2]' [2]'
{
TkActor* group0Actors[2];
group0->getActors(group0Actors, 2, 1); // start index: 1, because..why not?
TkActor* group1Actors[2];
group1->getActors(group1Actors, 2, 0);
group0Actors[0]->removeFromGroup();
group1->addActor(*group0Actors[0]);
group0Actors[1]->removeFromGroup();
group1->addActor(*group0Actors[1]);
group1Actors[0]->removeFromGroup();
group0->addActor(*group1Actors[0]);
}
// checks
EXPECT_EQ(families[0]->getActorCount(), 4);
EXPECT_EQ(group0->getActorCount(), 3);
EXPECT_EQ(families[1]->getActorCount(), 2);
EXPECT_EQ(group1->getActorCount(), 3);
// damage all
{
TkActor* allActors[6];
families[0]->getActors(allActors, 4, 0);
families[1]->getActors(allActors + 4, 2, 0);
typedef std::pair<TkGroup*, TkFamily*> pair;
std::set<pair> combinations;
for (auto actor : allActors)
{
combinations.emplace(pair(actor->getGroup(), &actor->getFamily()));
if (actor->getVisibleChunkCount() == 4)
{
actor->damage(getCubeSlicerProgram(), &csDamageParams1);
}
actor->damage(getCubeSlicerProgram(), &csDamageParams2);
}
EXPECT_EQ(combinations.size(), 4);
expectedVisibleChunks[families[0]] = ExpectedVisibleChunks(8, 1); // split in 8, 1 chunks each
expectedVisibleChunks[families[1]] = ExpectedVisibleChunks(8, 1); // split in 8, 1 chunks each
}
// async process 2 groups
{
EXPECT_GT(gtm1.process(2), (uint32_t)0);
EXPECT_GT(gtm0.process(2), (uint32_t)0);
uint32_t completed = 0;
while (completed < 2)
{
if (gtm1.wait(false))
completed++;
if (gtm0.wait(false))
completed++;
}
}
// checks
testResults(families, expectedVisibleChunks);
EXPECT_EQ(families[0]->getActorCount(), 8);
EXPECT_EQ(ftrack1.actors.size(), 8);
EXPECT_EQ(group0->getActorCount(), 8);
EXPECT_EQ(families[1]->getActorCount(), 8);
EXPECT_EQ(ftrack2.actors.size(), 8);
EXPECT_EQ(group1->getActorCount(), 8);
// damage till the end, aggressively
std::default_random_engine re;
{
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
NvBlastExtShearDamageDesc shearDamage = getShearDamageDesc(0, 0, 0);
NvBlastExtProgramParams shearDamageParams = { &shearDamage, nullptr };
std::vector<TkActor*> actors;
while (1)
{
TEST_ZONE_BEGIN("damage loop");
uint32_t n0 = families[0]->getActorCount();
uint32_t n1 = families[1]->getActorCount();
actors.resize(n0 + n1);
families[0]->getActors(actors.data(), n0, 0);
families[1]->getActors(actors.data() + n0, n1, 0);
bool workTBD = false;
for (TkActor* actor : actors)
{
if (!NvBlastActorCanFracture(actor->getActorLL(), nullptr))
{
continue;
}
workTBD = true;
if (actor->getGraphNodeCount() > 1)
{
actor->damage(getFalloffProgram(), &radialDamageParams);
}
else
{
actor->damage(getShearProgram(), &shearDamageParams);
}
if (re() % 1000 < 500)
{
// switch group
TkGroup* newGroup = actor->getGroup() == group0 ? group1 : group0;
actor->removeFromGroup();
newGroup->addActor(*actor);
}
}
TEST_ZONE_END("damage loop");
if (!workTBD)
break;
// async process 2 groups
{
EXPECT_GT(gtm1.process(2), (uint32_t)0);
EXPECT_GT(gtm0.process(2), (uint32_t)0);
uint32_t completed = 0;
while (completed < 2)
{
if (gtm1.wait(false))
completed++;
if (gtm0.wait(false))
completed++;
}
}
}
}
// checks
EXPECT_EQ(families[0]->getActorCount(), ftrack1.actors.size());
EXPECT_EQ(families[1]->getActorCount(), ftrack2.actors.size());
EXPECT_EQ(65536, families[0]->getActorCount() + families[1]->getActorCount());
EXPECT_EQ(65536, group0->getActorCount() + group1->getActorCount());
gtm0.release();
gtm1.release();
group0->release();
group1->release();
for (auto f : families)
f->release();
releaseTestAssets();
releaseFramework();
}
TEST_F(TkTestStrict, ActorDamageBufferedDamage)
{
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
// group
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fwk->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
// random engine
std::default_random_engine re;
// cube asset
GeneratorAsset cube;
TkAssetDesc assetDesc;
generateCube(cube, assetDesc, 4, 2, 3);
assetDesc.bondFlags = nullptr;
TkAsset* cubeAsset = fwk->createAsset(assetDesc);
testAssets.push_back(cubeAsset);
// actor desc
TkActorDesc cubeAD(cubeAsset);
// test will be repated 'trials' times. Because of random shuffle inside.
const uint32_t trials = 100;
for (uint32_t i = 0; i < trials; i++)
{
// create actor
TkActor* actor = fwk->createActor(cubeAD);
EXPECT_TRUE(actor != nullptr);
TkFamily* family = (&actor->getFamily());
group->addActor(*actor);
// damage 3 times with CubeSlicer 2 * 2 * 2 = 8 actors
// damage 4 corners with falloff radial 4 * 2 = 8 actors
// total 16 actors
uint32_t expectedActorCount = 16;
// fallof params
const float P = 0.5f;
const float R = 0.35f;
// 2 of damage types would be through user's NvBlastDamageProgram, this pointer must live till group->sync()
NvBlastExtRadialDamageDesc userR0 = getRadialDamageDesc(P, P, 0, R, R);
NvBlastExtProgramParams userProgramParams0 = { &userR0, nullptr };
NvBlastExtRadialDamageDesc userR1 = getRadialDamageDesc(-P, P, 0, R, R);
NvBlastExtProgramParams userProgramParams1 = { &userR1, nullptr };
CSParams csDamage0(0, 0.0f);
NvBlastExtProgramParams csDamageParams0 = { &csDamage0, nullptr };
CSParams csDamage1(1, 0.0f);
NvBlastExtProgramParams csDamageParams1 = { &csDamage1, nullptr };
CSParams csDamage2(2, 0.0f);
NvBlastExtProgramParams csDamageParams2 = { &csDamage2, nullptr };
NvBlastExtRadialDamageDesc r0 = getRadialDamageDesc(P, -P, 0, R, R);
NvBlastExtProgramParams rDamageParams0 = { &r0, nullptr };
NvBlastExtRadialDamageDesc r1 = getRadialDamageDesc(-P, -P, 0, R, R);
NvBlastExtProgramParams rDamageParams1 = { &r1, nullptr };
// fill damage functions, shuffle and apply
{
const uint32_t damageCount = 7;
std::vector<std::function<void(void)>> damageFns(damageCount);
damageFns[0] = [&]() { actor->damage(getCubeSlicerProgram(), &csDamageParams0); };
damageFns[1] = [&]() { actor->damage(getCubeSlicerProgram(), &csDamageParams1); };
damageFns[2] = [&]() { actor->damage(getCubeSlicerProgram(), &csDamageParams2); };
damageFns[3] = [&]() { actor->damage(getFalloffProgram(), &rDamageParams0); };
damageFns[4] = [&]() { actor->damage(getFalloffProgram(), &rDamageParams1); };
damageFns[5] = [&]() { actor->damage(getFalloffProgram(), &userProgramParams0); };
damageFns[6] = [&]() { actor->damage(getFalloffProgram(), &userProgramParams1); };
// shuffle order!
std::shuffle(std::begin(damageFns), std::end(damageFns), re);
for (uint32_t i = 0; i < damageCount; i++)
{
damageFns[i]();
}
}
// sync
EXPECT_GT(m_groupTM->process(), (uint32_t)0);
m_groupTM->wait();
const auto ac = family->getActorCount();
// check
EXPECT_EQ(family->getActorCount(), expectedActorCount);
EXPECT_EQ(group->getActorCount(), expectedActorCount);
// release
std::vector<TkActor*> actors(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (auto a : actors)
a->removeFromGroup();
family->release();
}
group->release();
releaseFramework();
}
TEST_F(TkTestStrict, CreateActor)
{
createFramework();
TkFramework* framework = NvBlastTkFrameworkGet();
const uint32_t assetDescCount = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
std::vector<TkAsset*> assets(assetDescCount);
// assets
for (uint32_t i = 0; i < assetDescCount; ++i)
{
TkAssetDesc desc;
reinterpret_cast<NvBlastAssetDesc&>(desc) = g_assetDescs[i];
desc.bondFlags = nullptr;
assets[i] = framework->createAsset(desc);
EXPECT_TRUE(assets[i] != nullptr);
}
// actors
std::vector<TkActor*> actors;;
std::vector<TkFamily*> actorFamilies;;
for (const TkAsset* asset : assets)
{
for (int i = 0; i < 2; i++)
{
TkActorDesc desc(asset);
TkActor* actor = framework->createActor(desc);
EXPECT_TRUE(actor != nullptr);
EXPECT_TRUE(actor->getActorLL() != nullptr);
//EXPECT_TRUE(&actor->getFamily() != nullptr);
EXPECT_TRUE(actor->getFamily().getActorCount() == 1);
actors.push_back(actor);
EXPECT_TRUE(std::find(actorFamilies.begin(), actorFamilies.end(), &actor->getFamily()) == actorFamilies.end());
actorFamilies.push_back(&actor->getFamily());
}
}
// framework checks
{
std::vector<TkObject*> objects;
// assets
{
const TkType* assetType = framework->getType(TkTypeIndex::Asset);
objects.resize(framework->getObjectCount(*assetType));
EXPECT_TRUE(framework->getObjects(reinterpret_cast<TkIdentifiable**>(objects.data()), static_cast<uint32_t>(objects.size()), *assetType) == static_cast<uint32_t>(objects.size()));
ExpectArrayMatch(objects.data(), objects.size(), (TkObject**)assets.data(), assets.size());
}
// actors
# if(0) // framework does not track actors explicitly anymore
{
const TkType* actorType = framework->getType(TkTypeIndex::Actor);
objects.resize(framework->getObjectCount(*actorType));
EXPECT_TRUE(framework->getObjects(reinterpret_cast<TkIdentifiable**>(objects.data()), objects.size(), *actorType) == objects.size());
ExpectArrayMatch(objects.data(), objects.size(), (TkObject**)actors.data(), actors.size());
}
# endif
// families
{
const TkType* familyType = framework->getType(TkTypeIndex::Family);
objects.resize(framework->getObjectCount(*familyType));
EXPECT_TRUE(framework->getObjects(reinterpret_cast<TkIdentifiable**>(objects.data()), static_cast<uint32_t>(objects.size()), *familyType) == static_cast<uint32_t>(objects.size()));
ExpectArrayMatch(objects.data(), objects.size(), (TkObject**)actorFamilies.data(), actorFamilies.size());
}
}
// release
for (TkActor* actor : actors)
{
actor->release();
}
for (TkAsset* asset : assets)
{
asset->release();
}
releaseFramework();
}
template<int FailMask, int Verbosity>
TkFamily* TkBaseTest<FailMask, Verbosity>::familySerialization(TkFamily* family)
{
#if 0
TkFramework* fw = NvBlastTkFrameworkGet();
const TkType* familyType = fw->getType(TkTypeIndex::Family);
EXPECT_TRUE(familyType != nullptr);
PsMemoryBuffer* membuf = NVBLAST_NEW(PsMemoryBuffer);
EXPECT_TRUE(membuf != nullptr);
if (membuf != nullptr)
{
const bool result = family->serialize(*membuf);
EXPECT_EQ(true, result);
if (!result)
{
return family;
}
const size_t familyActorCount = family->getActorCount();
const TkAsset* familyAsset = family->getAsset();
family->release();
family = reinterpret_cast<TkFamily*>(fw->deserialize(*membuf));
EXPECT_TRUE(family != nullptr);
if (family != nullptr)
{
EXPECT_EQ(familyActorCount, family->getActorCount());
EXPECT_EQ(familyAsset, family->getAsset());
}
membuf->release();
}
return family;
#endif
return nullptr;
}
TEST_F(TkTestAllowWarnings, DISABLED_FamilySerialization)
{
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
// group
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fwk->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
// random engine
std::default_random_engine re;
// cube asset
TkAsset* cubeAsset = createCubeAsset(4, 2, 3, false);
// actor desc
TkActorDesc cubeAD(cubeAsset);
// create actor
TkActor* actor = fwk->createActor(cubeAD);
EXPECT_TRUE(actor != nullptr);
TkFamily* family = (&actor->getFamily());
// set an ID
NvBlastID id;
memcpy(id.data, "Observer-expectancy effect", sizeof(NvBlastID)); // Stuffing an arbitrary 16 bytes (The prefix of the given string)
cubeAsset->setID(id);
// serialize/deserialize
family = familySerialization(family);
// fill damage functions, apply one by one and serialize family in between
{
// damage 3 times with CubeSlicer 2 * 2 * 2 = 8 actors
// damage 4 corners with falloff radial 4 * 2 = 8 actors
// total 16 actors
uint32_t expectedActorCount = 16;
// cube slicer params
CSParams csDamage0(0, 0.0f);
NvBlastExtProgramParams csDamageParams0 = { &csDamage0, nullptr };
CSParams csDamage1(1, 0.0f);
NvBlastExtProgramParams csDamageParams1 = { &csDamage1, nullptr };
CSParams csDamage2(2, 0.0f);
NvBlastExtProgramParams csDamageParams2 = { &csDamage2, nullptr };
// fallof params
const float P = 0.5f;
const float R = 0.35f;
NvBlastExtRadialDamageDesc r0 = getRadialDamageDesc(P, P, 0, R, R);
NvBlastExtRadialDamageDesc r1 = getRadialDamageDesc(-P, P, 0, R, R);
NvBlastExtRadialDamageDesc r2 = getRadialDamageDesc(P, -P, 0, R, R);
NvBlastExtRadialDamageDesc r3 = getRadialDamageDesc(-P, -P, 0, R, R);
NvBlastExtProgramParams r0p = { &r0, nullptr };
NvBlastExtProgramParams r1p = { &r1, nullptr };
NvBlastExtProgramParams r2p = { &r2, nullptr };
NvBlastExtProgramParams r3p = { &r3, nullptr };
const uint32_t damageCount = 7;
std::vector<std::function<void(TkActor* a)>> damageFns(damageCount);
damageFns[0] = [&](TkActor* a) { a->damage(getCubeSlicerProgram(), &csDamageParams0); };
damageFns[1] = [&](TkActor* a) { a->damage(getCubeSlicerProgram(), &csDamageParams1); };
damageFns[2] = [&](TkActor* a) { a->damage(getCubeSlicerProgram(), &csDamageParams2); };
damageFns[3] = [&](TkActor* a) { a->damage(getFalloffProgram(), &r0p); };
damageFns[4] = [&](TkActor* a) { a->damage(getFalloffProgram(), &r1p); };
damageFns[5] = [&](TkActor* a) { a->damage(getFalloffProgram(), &r2p); };
damageFns[6] = [&](TkActor* a) { a->damage(getFalloffProgram(), &r3p); };
std::vector<TkActor*> actors(64);
for (uint32_t i = 0; i < damageCount; i++)
{
actors.resize(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
// damage
for (auto actor : actors)
{
group->addActor(*actor);
damageFns[i](actor);
}
// sync
EXPECT_GT(m_groupTM->process(), (uint32_t)0);
m_groupTM->wait();
family = familySerialization(family);
}
// check
EXPECT_EQ(family->getActorCount(), expectedActorCount);
}
// release
family->release();
group->release();
releaseFramework();
}
TEST_F(TkTestStrict, GroupStats)
{
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
// group
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fwk->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
TkAsset* cubeAsset = createCubeAsset(4, 2);
TkActorDesc cubeDesc(cubeAsset);
TkActor* cubeActor1 = fwk->createActor(cubeDesc);
TkActor* cubeActor2 = fwk->createActor(cubeDesc);
TkActor* cubeActor3 = fwk->createActor(cubeDesc);
TkActor* cubeActor4 = fwk->createActor(cubeDesc);
group->addActor(*cubeActor1);
group->addActor(*cubeActor2);
group->addActor(*cubeActor3);
group->addActor(*cubeActor4);
NvBlastExtRadialDamageDesc r0 = getRadialDamageDesc(0.0f, 0.0f, 0.0f);
NvBlastExtProgramParams radialDamageParams = { &r0, nullptr };
cubeActor1->damage(getFalloffProgram(), &radialDamageParams);
cubeActor2->damage(getFalloffProgram(), &radialDamageParams);
cubeActor3->damage(getFalloffProgram(), &radialDamageParams);
cubeActor4->damage(getFalloffProgram(), &radialDamageParams);
Nv::Blast::Time time;
m_groupTM->process();
m_groupTM->wait();
int64_t groupTime = time.getElapsedTicks();
TkGroupStats gstats;
group->getStats(gstats);
int64_t total = gstats.timers.fracture + gstats.timers.island + gstats.timers.material + gstats.timers.partition + gstats.timers.visibility;
#if NV_PROFILE
EXPECT_GT(total, 0); // some values are reported
EXPECT_LT(groupTime, total); // total LL time is higher than group time
EXPECT_GT((double)gstats.workerTime / groupTime, 2.0); // expect some minimal speedup (including overhead)
EXPECT_EQ(4, gstats.processedActorsCount); // actors processed
#endif
releaseFramework();
}
TEST_F(TkTestStrict, FractureReportSupport)
{
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
NvBlastChunkDesc chunkDescs[] =
{
{ { 0,0,0 }, 2, UINT32_MAX, NvBlastChunkDesc::SupportFlag, 'prnt' },
{ { -1,0,0 }, 1, 0, NvBlastChunkDesc::NoFlags, 'left' },
{ { +1,0,0 }, 1, 0, NvBlastChunkDesc::NoFlags, 'rght' },
};
TkAssetDesc assetDesc;
assetDesc.chunkCount = sizeof(chunkDescs) / sizeof(NvBlastChunkDesc);
assetDesc.chunkDescs = chunkDescs;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
assetDesc.bondFlags = nullptr;
const TkAsset* asset = fwk->createAsset(assetDesc);
TkActorDesc actorDesc;
actorDesc.asset = asset;
TkActor* actor = fwk->createActor(actorDesc);
actor->userData = (void*)'root';
class Listener : public TkEventListener
{
void receive(const TkEvent* events, uint32_t eventCount) override
{
for (uint32_t i = 0; i < eventCount; i++)
{
const TkEvent& event = events[i];
switch (event.type)
{
case TkJointUpdateEvent::EVENT_TYPE:
FAIL() << "not expecting joints here";
break;
case TkFractureCommands::EVENT_TYPE:
{
const TkActorData& actor = event.getPayload<TkFractureCommands>()->tkActorData;
// Group::sync still needed the family for SharedMemory management.
EXPECT_TRUE(nullptr != actor.family);
EXPECT_EQ((void*)'root', actor.userData);
EXPECT_EQ(0, actor.index);
}
break;
case TkFractureEvents::EVENT_TYPE:
{
const TkActorData& actor = event.getPayload<TkFractureEvents>()->tkActorData;
EXPECT_EQ((void*)'root', actor.userData);
EXPECT_EQ(0, actor.index);
}
break;
case TkSplitEvent::EVENT_TYPE:
{
const TkSplitEvent* split = event.getPayload<TkSplitEvent>();
EXPECT_TRUE(nullptr != split->parentData.family);
EXPECT_EQ((void*)'root', split->parentData.userData);
EXPECT_EQ(0, split->parentData.index);
EXPECT_EQ(2, split->numChildren);
EXPECT_EQ(1, split->children[0]->getVisibleChunkCount());
uint32_t visibleChunkIndex;
// child order is not mandatory
{
TkActor* a = split->children[0];
a->getVisibleChunkIndices(&visibleChunkIndex, 1);
uint32_t li = a->getIndex();
EXPECT_EQ(1, li);
EXPECT_EQ(split->parentData.family, &a->getFamily());
EXPECT_EQ('left', a->getAsset()->getChunks()[visibleChunkIndex].userData);
}
{
TkActor*a = split->children[1];
a->getVisibleChunkIndices(&visibleChunkIndex, 1);
uint32_t ri = a->getIndex();
EXPECT_EQ(2, ri);
EXPECT_EQ(split->parentData.family, &a->getFamily());
EXPECT_EQ('rght', a->getAsset()->getChunks()[visibleChunkIndex].userData);
}
}
break;
default:
FAIL() << "should not get here";
}
}
}
} listener;
actor->getFamily().addListener(listener);
// expected state for the original actor, see Listener
EXPECT_EQ((void*)'root', actor->userData);
EXPECT_EQ(0, actor->getIndex());
TkGroupDesc groupDesc = { m_taskman->getCpuDispatcher()->getWorkerCount() };
TkGroup* group = fwk->createGroup(groupDesc);
m_groupTM->setGroup(group);
group->addActor(*actor);
// this will trigger hierarchical chunk fracture
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
actor->damage(getFalloffProgram(), &radialDamageParams);
m_groupTM->process();
m_groupTM->wait();
releaseFramework();
}
TEST_F(TkTestStrict, FractureReportGraph)
{
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
NvBlastBond bondToBreak = { { 1, 0, 0 }, 1, { 0, 0, 0 }, 0 };
NvBlastBond bondToKeep = { { 1, 0, 0 }, 1, { 10, 10, 10 }, 0 };
NvBlastBondDesc bondDescs[] =
{
{ bondToKeep, { 1, 2 } },
{ bondToBreak, { 2, 3 } },
};
NvBlastChunkDesc chunkDescs[] =
{
{ { 0, 0, 0 }, 2, UINT32_MAX, NvBlastChunkDesc::NoFlags, 'root' },
{ { -1, 0, 0 }, 1, 0, NvBlastChunkDesc::SupportFlag, 'A' },
{ { +1, 0, 0 }, 1, 0, NvBlastChunkDesc::SupportFlag, 'B' },
{ { +1, 0, 0 }, 1, 0, NvBlastChunkDesc::SupportFlag, 'C' },
};
TkAssetDesc assetDesc;
assetDesc.chunkCount = sizeof(chunkDescs) / sizeof(NvBlastChunkDesc);
assetDesc.chunkDescs = chunkDescs;
assetDesc.bondCount = 2;
assetDesc.bondDescs = bondDescs;
assetDesc.bondFlags = nullptr;
const TkAsset* asset = fwk->createAsset(assetDesc);
TkActorDesc actorDesc;
actorDesc.asset = asset;
TkActor* rootActor = fwk->createActor(actorDesc);
rootActor->userData = (void*)'root';
class Listener : public TkEventListener
{
void receive(const TkEvent* events, uint32_t eventCount) override
{
for (uint32_t i = 0; i < eventCount; i++)
{
const TkEvent& event = events[i];
switch (event.type)
{
case TkJointUpdateEvent::EVENT_TYPE:
FAIL() << "not expecting joints here";
break;
case TkFractureCommands::EVENT_TYPE:
{
const TkActorData& actor = event.getPayload<TkFractureCommands>()->tkActorData;
// Group::sync still needed the family for SharedMemory management.
EXPECT_TRUE(nullptr != actor.family);
// original actor state is not preserved, the last test will fail
EXPECT_EQ((void*)'root', actor.userData);
EXPECT_EQ(0, actor.index);
// this information was invalid anyway
//EXPECT_EQ(1, actor->getVisibleChunkCount()) << "state not preserved";
}
break;
case TkFractureEvents::EVENT_TYPE:
{
const TkActorData& actor = event.getPayload<TkFractureEvents>()->tkActorData;
// Group::sync still needed the family for SharedMemory management.
EXPECT_TRUE(nullptr != actor.family);
// original actor state is not preserved, the last test will fail
EXPECT_EQ((void*)'root', actor.userData);
EXPECT_EQ(0, actor.index);
// this information was invalid anyway
//EXPECT_EQ(1, actor->getVisibleChunkCount()) << "state not preserved";
}
break;
case TkSplitEvent::EVENT_TYPE:
{
const TkSplitEvent* split = event.getPayload<TkSplitEvent>();
EXPECT_EQ((void*)'root', split->parentData.userData);
EXPECT_EQ(0, split->parentData.index);
EXPECT_EQ(2, split->numChildren);
uint32_t visibleChunkIndex[2];
// child order is not mandatory
{
TkActor* a = split->children[1];
EXPECT_EQ(2, a->getVisibleChunkCount()); // chunks A and B
a->getVisibleChunkIndices(visibleChunkIndex, 2);
uint32_t actorIndex = a->getIndex();
EXPECT_EQ(0, actorIndex); // same index as the original actor
// visible chunk order is not mandatory
EXPECT_EQ('B', a->getAsset()->getChunks()[visibleChunkIndex[0]].userData);
EXPECT_EQ('A', a->getAsset()->getChunks()[visibleChunkIndex[1]].userData);
}
{
TkActor* a = split->children[0];
EXPECT_EQ(1, a->getVisibleChunkCount());
a->getVisibleChunkIndices(visibleChunkIndex, 1);
uint32_t actorIndex = a->getIndex();
EXPECT_EQ(2, actorIndex);
EXPECT_EQ('C', a->getAsset()->getChunks()[visibleChunkIndex[0]].userData);
}
}
break;
default:
FAIL() << "should not get here";
}
}
}
} listener;
rootActor->getFamily().addListener(listener);
// expected state for the original actor, see Listener
EXPECT_EQ((void*)'root', rootActor->userData);
EXPECT_EQ(0, rootActor->getIndex());
EXPECT_EQ(1, rootActor->getVisibleChunkCount());
TkGroupDesc groupDesc = { m_taskman->getCpuDispatcher()->getWorkerCount() };
TkGroup* group = fwk->createGroup(groupDesc);
m_groupTM->setGroup(group);
group->addActor(*rootActor);
// this will trigger one bond to break
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0, 0.5f, 0.5f);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
rootActor->damage(getFalloffProgram(), &radialDamageParams);
m_groupTM->process();
m_groupTM->wait();
releaseFramework();
}
TEST_F(TkTestStrict, SplitWarning) // GWD-167
{
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
NvBlastChunkDesc chunkDescs[] =
{
{ { 0,0,0 }, 2, UINT32_MAX, NvBlastChunkDesc::SupportFlag, 'root' },
{ { -1,0,0 }, 1, 0, NvBlastChunkDesc::NoFlags, 'A' },
{ { +1,0,0 }, 1, 0, NvBlastChunkDesc::NoFlags, 'B' },
{ { -1,0,0 }, 1, 0, NvBlastChunkDesc::NoFlags, 'C' },
{ { +1,0,0 }, 1, 0, NvBlastChunkDesc::NoFlags, 'D' },
{ { -1,0,0 }, 1, 1, NvBlastChunkDesc::NoFlags, 'AAAA' },
{ { +1,0,0 }, 1, 2, NvBlastChunkDesc::NoFlags, 'BBBB' },
{ { -1,0,0 }, 1, 3, NvBlastChunkDesc::NoFlags, 'CCCC' },
{ { +1,0,0 }, 1, 4, NvBlastChunkDesc::NoFlags, 'DDDD' },
};
TkAssetDesc assetDesc;
assetDesc.chunkCount = sizeof(chunkDescs) / sizeof(NvBlastChunkDesc);
assetDesc.chunkDescs = chunkDescs;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
assetDesc.bondFlags = nullptr;
const TkAsset* asset = fwk->createAsset(assetDesc);
TkActorDesc actorDesc;
actorDesc.asset = asset;
TkActor* actor = fwk->createActor(actorDesc);
TkGroupDesc groupDesc = { m_taskman->getCpuDispatcher()->getWorkerCount() };
TkGroup* group = fwk->createGroup(groupDesc);
m_groupTM->setGroup(group);
group->addActor(*actor);
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
actor->damage(getFalloffProgram(), &radialDamageParams);
m_groupTM->process();
m_groupTM->wait();
releaseFramework();
}
TEST_F(TkTestAllowWarnings, ChangeThreadCountToZero)
{
// tests that group still allocates memory for one worker
// by replacing to a 0 threads cpu dispatcher (warns)
// mainly relies on internal asserts
class EventCounter : public TkEventListener
{
public:
EventCounter() :fracCommands(0), fracEvents(0) {}
void receive(const TkEvent* events, uint32_t eventCount)
{
for (uint32_t i = 0; i < eventCount; i++)
{
const TkEvent& event = events[i];
switch (event.type)
{
case TkFractureCommands::EVENT_TYPE:
fracCommands++;
break;
case TkFractureEvents::EVENT_TYPE:
fracEvents++;
break;
default:
FAIL();
// no split due to single chunk
// no joints
}
}
}
uint32_t fracCommands, fracEvents;
} listener;
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
NvBlastChunkDesc chunkDescs[] = {
{ { 0,0,0 }, 2, UINT32_MAX, NvBlastChunkDesc::SupportFlag, 'root' }
};
TkAssetDesc assetDesc;
assetDesc.chunkCount = sizeof(chunkDescs) / sizeof(NvBlastChunkDesc);
assetDesc.chunkDescs = chunkDescs;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
assetDesc.bondFlags = nullptr;
const TkAsset* asset = fwk->createAsset(assetDesc);
TkActorDesc actorDesc;
actorDesc.asset = asset;
TkActor* actor1 = fwk->createActor(actorDesc);
TkActor* actor2 = fwk->createActor(actorDesc);
TkActor* actor3 = fwk->createActor(actorDesc);
TkActor* actor4 = fwk->createActor(actorDesc);
actor1->getFamily().addListener(listener);
actor2->getFamily().addListener(listener);
actor3->getFamily().addListener(listener);
actor4->getFamily().addListener(listener);
TestCpuDispatcher* disp0 = new TestCpuDispatcher(0);
TestCpuDispatcher* disp4 = new TestCpuDispatcher(4);
m_taskman->setCpuDispatcher(*disp4);
TkGroupDesc groupDesc = { m_taskman->getCpuDispatcher()->getWorkerCount() };
TkGroup* group = fwk->createGroup(groupDesc);
m_groupTM->setGroup(group);
group->addActor(*actor1);
group->addActor(*actor2);
m_taskman->setCpuDispatcher(*disp0);
//group->setWorkerCount(m_taskman->getCpuDispatcher()->getWorkerCount());
group->addActor(*actor3);
group->addActor(*actor4);
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
actor1->damage(getFalloffProgram(), &radialDamageParams);
actor2->damage(getFalloffProgram(), &radialDamageParams);
actor3->damage(getFalloffProgram(), &radialDamageParams);
actor4->damage(getFalloffProgram(), &radialDamageParams);
m_groupTM->process();
m_groupTM->wait();
EXPECT_EQ(4, listener.fracCommands);
EXPECT_EQ(4, listener.fracEvents);
releaseFramework();
disp0->release();
disp4->release();
}
TEST_F(TkTestStrict, ChangeThreadCountUp)
{
// tests that group allocates more memory for additional workers
// by replacing to a higher thread count cpu dispatcher (warns)
// mainly relies on internal asserts
class EventCounter : public TkEventListener
{
public:
EventCounter() :fracCommands(0), fracEvents(0) {}
void receive(const TkEvent* events, uint32_t eventCount)
{
for (uint32_t i = 0; i < eventCount; i++)
{
const TkEvent& event = events[i];
switch (event.type)
{
case TkFractureCommands::EVENT_TYPE:
fracCommands++;
break;
case TkFractureEvents::EVENT_TYPE:
fracEvents++;
break;
default:
FAIL();
// no split due to single chunk
// no joints
}
}
}
uint32_t fracCommands, fracEvents;
} listener;
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
NvBlastChunkDesc chunkDescs[] = {
{ { 0,0,0 }, 2, UINT32_MAX, NvBlastChunkDesc::SupportFlag, 'root' }
};
TkAssetDesc assetDesc;
assetDesc.chunkCount = sizeof(chunkDescs) / sizeof(NvBlastChunkDesc);
assetDesc.chunkDescs = chunkDescs;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
assetDesc.bondFlags = nullptr;
const TkAsset* asset = fwk->createAsset(assetDesc);
TkActorDesc actorDesc;
actorDesc.asset = asset;
TkActor* actor1 = fwk->createActor(actorDesc);
TkActor* actor2 = fwk->createActor(actorDesc);
TkActor* actor3 = fwk->createActor(actorDesc);
TkActor* actor4 = fwk->createActor(actorDesc);
actor1->getFamily().addListener(listener);
actor2->getFamily().addListener(listener);
actor3->getFamily().addListener(listener);
actor4->getFamily().addListener(listener);
TestCpuDispatcher* disp2 = new TestCpuDispatcher(2);
TestCpuDispatcher* disp4 = new TestCpuDispatcher(4);
m_taskman->setCpuDispatcher(*disp2);
TkGroupDesc groupDesc = { m_taskman->getCpuDispatcher()->getWorkerCount() };
TkGroup* group = fwk->createGroup(groupDesc);
m_groupTM->setGroup(group);
group->addActor(*actor1);
group->addActor(*actor2);
group->addActor(*actor3);
group->addActor(*actor4);
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
actor1->damage(getFalloffProgram(), &radialDamageParams);
actor2->damage(getFalloffProgram(), &radialDamageParams);
actor3->damage(getFalloffProgram(), &radialDamageParams);
actor4->damage(getFalloffProgram(), &radialDamageParams);
m_taskman->setCpuDispatcher(*disp4);
//group->setWorkerCount(m_taskman->getCpuDispatcher()->getWorkerCount());
m_groupTM->process();
m_groupTM->wait();
EXPECT_EQ(4, listener.fracCommands);
EXPECT_EQ(4, listener.fracEvents);
releaseFramework();
disp2->release();
disp4->release();
}
TEST_F(TkTestAllowWarnings, GroupNoWorkers)
{
// tests that group still works without a taskmanager
// a warnings is expected
// mainly relies on internal asserts
class EventCounter : public TkEventListener
{
public:
EventCounter() :fracCommands(0), fracEvents(0) {}
void receive(const TkEvent* events, uint32_t eventCount)
{
for (uint32_t i = 0; i < eventCount; i++)
{
const TkEvent& event = events[i];
switch (event.type)
{
case TkFractureCommands::EVENT_TYPE:
fracCommands++;
break;
case TkFractureEvents::EVENT_TYPE:
fracEvents++;
break;
default:
FAIL();
// no split due to single chunk
// no joints
}
}
}
uint32_t fracCommands, fracEvents;
} listener;
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
NvBlastChunkDesc chunkDescs[] = {
{ { 0,0,0 }, 2, UINT32_MAX, NvBlastChunkDesc::SupportFlag, 'root' }
};
TkAssetDesc assetDesc;
assetDesc.chunkCount = sizeof(chunkDescs) / sizeof(NvBlastChunkDesc);
assetDesc.chunkDescs = chunkDescs;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
assetDesc.bondFlags = nullptr;
const TkAsset* asset = fwk->createAsset(assetDesc);
TkActorDesc actorDesc;
actorDesc.asset = asset;
TkActor* actor1 = fwk->createActor(actorDesc);
TkActor* actor2 = fwk->createActor(actorDesc);
TkActor* actor3 = fwk->createActor(actorDesc);
TkActor* actor4 = fwk->createActor(actorDesc);
actor1->getFamily().addListener(listener);
actor2->getFamily().addListener(listener);
actor3->getFamily().addListener(listener);
actor4->getFamily().addListener(listener);
TestCpuDispatcher* disp = new TestCpuDispatcher(0);
m_taskman->setCpuDispatcher(*disp);
TkGroupDesc groupDesc = { m_taskman->getCpuDispatcher()->getWorkerCount() };
TkGroup* group = fwk->createGroup(groupDesc);
m_groupTM->setGroup(group);
group->addActor(*actor1);
group->addActor(*actor2);
group->addActor(*actor3);
group->addActor(*actor4);
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams programParams = {
&radialDamage,
getDefaultMaterial()
};
actor1->damage(getFalloffProgram(), &programParams);
actor2->damage(getFalloffProgram(), &programParams);
actor3->damage(getFalloffProgram(), &programParams);
actor4->damage(getFalloffProgram(), &programParams);
m_groupTM->process();
m_groupTM->wait();
EXPECT_EQ(4, listener.fracCommands);
EXPECT_EQ(4, listener.fracEvents);
disp->release();
releaseFramework();
}
| 53,291 | C++ | 32.922342 | 192 | 0.601621 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/CoreTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include <algorithm>
#include "gtest/gtest.h"
//#include "NvBlast.h"
#include "NvBlastActor.h"
#include "NvBlastIndexFns.h"
#include "NvBlastGlobals.h"
#include "TestAssets.h"
#include "NvBlastActor.h"
static void messageLog(int type, const char* msg, const char* file, int line)
{
{
switch (type)
{
case NvBlastMessage::Error: std::cout << "NvBlast Error message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Warning: std::cout << "NvBlast Warning message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Info: std::cout << "NvBlast Info message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Debug: std::cout << "NvBlast Debug message in " << file << "(" << line << "): " << msg << "\n"; break;
}
}
}
TEST(CoreTests, IndexStartLookup)
{
uint32_t lookup[32];
uint32_t indices[] = {1,1,2,2,4,4,4};
Nv::Blast::createIndexStartLookup<uint32_t>(lookup, 0, 30, indices, 7, 4);
EXPECT_EQ(lookup[0], 0);
EXPECT_EQ(lookup[1], 0);
EXPECT_EQ(lookup[2], 2);
EXPECT_EQ(lookup[3], 4);
EXPECT_EQ(lookup[4], 4);
EXPECT_EQ(lookup[5], 7);
EXPECT_EQ(lookup[31], 7);
}
#include "NvBlastGeometry.h"
int findClosestNodeByBonds(const float point[4], const NvBlastActor* actor)
{
const Nv::Blast::Actor* a = static_cast<const Nv::Blast::Actor*>(actor);
const NvBlastFamily* family = NvBlastActorGetFamily(actor, messageLog);
const NvBlastAsset* asset = NvBlastFamilyGetAsset(family, messageLog);
const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, messageLog);
return Nv::Blast::findClosestNode(
point,
a->getFirstGraphNodeIndex(),
a->getFamilyHeader()->getGraphNodeIndexLinks(),
graph.adjacencyPartition,
graph.adjacentNodeIndices,
graph.adjacentBondIndices,
NvBlastAssetGetBonds(asset, messageLog),
NvBlastActorGetBondHealths(actor, messageLog),
graph.chunkIndices
);
}
int findClosestNodeByChunks(const float point[4], const NvBlastActor* actor)
{
const Nv::Blast::Actor* a = static_cast<const Nv::Blast::Actor*>(actor);
return Nv::Blast::findClosestNode(
point,
a->getFirstGraphNodeIndex(),
a->getFamilyHeader()->getGraphNodeIndexLinks(),
a->getAsset()->m_graph.getAdjacencyPartition(),
a->getAsset()->m_graph.getAdjacentNodeIndices(),
a->getAsset()->m_graph.getAdjacentBondIndices(),
a->getAsset()->getBonds(),
a->getFamilyHeader()->getBondHealths(),
a->getAsset()->getChunks(),
a->getFamilyHeader()->getLowerSupportChunkHealths(),
a->getAsset()->m_graph.getChunkIndices()
);
}
TEST(CoreTests, FindChunkByPosition)
{
std::vector<char> scratch;
const NvBlastAssetDesc& desc = g_assetDescs[0]; // 1-cube
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, nullptr));
void* amem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&desc, nullptr));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &desc, scratch.data(), nullptr);
ASSERT_TRUE(asset != nullptr);
uint32_t expectedNode[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
const float positions[] = {
-2.0f, -2.0f, -2.0f,
+2.0f, -2.0f, -2.0f,
-2.0f, +2.0f, -2.0f,
+2.0f, +2.0f, -2.0f,
-2.0f, -2.0f, +2.0f,
+2.0f, -2.0f, +2.0f,
-2.0f, +2.0f, +2.0f,
+2.0f, +2.0f, +2.0f,
};
const float* pos = &positions[0];
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = NVBLAST_ALLOC(NvBlastAssetGetFamilyMemorySize(asset, nullptr));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, nullptr);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, nullptr));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), nullptr);
ASSERT_TRUE(actor != nullptr);
for (int i = 0; i < 8; ++i, pos += 3)
{
EXPECT_EQ(expectedNode[i], findClosestNodeByBonds(pos, actor));
EXPECT_EQ(expectedNode[i], findClosestNodeByChunks(pos, actor));
}
EXPECT_TRUE(NvBlastActorDeactivate(actor, nullptr));
NVBLAST_FREE(family);
NVBLAST_FREE(asset);
}
TEST(CoreTests, FindChunkByPositionUShape)
{
/*
considering this graph
4->5->6
^
|
1->2->3
and trying to find chunks by some position
*/
const NvBlastChunkDesc uchunks[7] =
{
// centroid volume parent idx flags ID
{ {3.0f, 2.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {1.0f, 1.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {3.0f, 1.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {5.0f, 1.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {1.0f, 3.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {3.0f, 3.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ {5.0f, 3.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 6 }
};
const NvBlastBondDesc ubonds[5] =
{
// normal area centroid userData chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 1.0f, 0.0f }, 0 }, { 2, 1 } }, // index swap should not matter
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 4.0f, 1.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 1.0f, 2.0f, 0.0f }, 0 }, { 1, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 3.0f, 0.0f }, 0 }, { 4, 5 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 4.0f, 3.0f, 0.0f }, 0 }, { 5, 6 } },
};
const NvBlastAssetDesc desc = { 7, uchunks, 5, ubonds };
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* amem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &desc, scratch.data(), messageLog);
ASSERT_TRUE(asset != nullptr);
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = NVBLAST_ALLOC(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, nullptr);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), nullptr);
ASSERT_TRUE(actor != nullptr);
srand(100);
for (uint32_t i = 0; i < 100000; i++)
{
float rx = 20 * (float)(rand() - 1) / RAND_MAX - 10;
float ry = 20 * (float)(rand() - 1) / RAND_MAX - 10;
float rz = 0.0f;
float rpos[] = { rx, ry, rz };
// open boundaries
uint32_t col = std::max(0, std::min(2, int(rx / 2)));
uint32_t row = std::max(0, std::min(1, int(ry / 2)));
uint32_t expectedNode = col + row * 3;
//printf("iteration %i: %.1f %.1f %.1f expected: %d\n", i, rpos[0], rpos[1], rpos[2], expectedNode);
{
uint32_t returnedNode = findClosestNodeByBonds(rpos, actor);
if (expectedNode != returnedNode)
findClosestNodeByBonds(rpos, actor);
EXPECT_EQ(expectedNode, returnedNode);
}
{
uint32_t returnedNode = findClosestNodeByChunks(rpos, actor);
if (expectedNode != returnedNode)
findClosestNodeByChunks(rpos, actor);
EXPECT_EQ(expectedNode, returnedNode);
}
}
EXPECT_TRUE(NvBlastActorDeactivate(actor, messageLog));
NVBLAST_FREE(family);
NVBLAST_FREE(asset);
}
TEST(CoreTests, FindChunkByPositionLandlocked)
{
// 7 > 8 > 9
// ^ ^ ^
// 4 > 5 > 6
// ^ ^ ^
// 1 > 2 > 3
// chunk 5 (node 4) is broken out (landlocked)
// find closest chunk/node on the two new actors
const NvBlastChunkDesc chunks[10] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {1.0f, 1.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {3.0f, 1.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {5.0f, 1.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {1.0f, 3.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {3.0f, 3.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ {5.0f, 3.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {1.0f, 5.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ {3.0f, 5.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 8 },
{ {5.0f, 5.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 9 },
};
const NvBlastBondDesc bonds[12] =
{
// normal area centroid userData chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 1.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 4.0f, 1.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 3.0f, 0.0f }, 0 }, { 4, 5 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 4.0f, 3.0f, 0.0f }, 0 }, { 5, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 5.0f, 0.0f }, 0 }, { 7, 8 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 4.0f, 5.0f, 0.0f }, 0 }, { 8, 9 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 1.0f, 2.0f, 0.0f }, 0 }, { 1, 4 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 3.0f, 2.0f, 0.0f }, 0 }, { 2, 5 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 5.0f, 2.0f, 0.0f }, 0 }, { 3, 6 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 1.0f, 4.0f, 0.0f }, 0 }, { 4, 7 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 3.0f, 4.0f, 0.0f }, 0 }, { 5, 8 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 5.0f, 4.0f, 0.0f }, 0 }, { 6, 9 } },
};
const NvBlastAssetDesc desc = { 10, chunks, 12, bonds };
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* amem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &desc, scratch.data(), messageLog);
ASSERT_TRUE(asset != nullptr);
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = NVBLAST_ALLOC(NvBlastAssetGetFamilyMemorySize(asset, nullptr));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, nullptr);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, nullptr));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), nullptr);
ASSERT_TRUE(actor != nullptr);
float point[4] = { 3.0f, 3.0f, 0.0f };
EXPECT_EQ(4, findClosestNodeByChunks(point, actor));
EXPECT_EQ(4, findClosestNodeByBonds(point, actor));
NvBlastChunkFractureData chunkBuffer[1];
NvBlastFractureBuffers events = { 0, 1, nullptr, chunkBuffer };
NvBlastChunkFractureData chunkFracture = { 0, 5, 1.0f };
NvBlastFractureBuffers commands = { 0, 1, nullptr, &chunkFracture };
NvBlastActorApplyFracture(&events, actor, &commands, messageLog, nullptr);
EXPECT_EQ(1, events.chunkFractureCount);
NvBlastActor* newActors[5];
NvBlastActorSplitEvent splitEvent = { nullptr, newActors };
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, messageLog));
size_t newActorsCount = NvBlastActorSplit(&splitEvent, actor, 5, scratch.data(), messageLog, nullptr);
ASSERT_EQ(actor, newActors[1]);
EXPECT_NE(4, findClosestNodeByChunks(point, actor));
EXPECT_NE(4, findClosestNodeByBonds(point, actor));
float point2[4] = { 80.0f, 80.0f, 80.0f };
EXPECT_EQ(4, findClosestNodeByChunks(point2, newActors[0]));
EXPECT_EQ(4, findClosestNodeByBonds(point, newActors[0]));
for (uint32_t i = 0; i < newActorsCount; ++i)
{
EXPECT_TRUE(NvBlastActorDeactivate(newActors[i], nullptr));
}
NVBLAST_FREE(family);
NVBLAST_FREE(asset);
}
TEST(CoreTests, FindClosestByChunkAccuracy)
{
// (0,0) +---+-------+
// | | 1 |
// | 2 +---+---+
// | | 5 | |
// +---+---+ 4 |
// | 3 | |
// +-------+---+ (6,6)
// random point lookup over the actor's space
// tests would fail if findClosestNodeByChunks didn't improve accuracy with the help of bonds
const NvBlastChunkDesc chunks[6] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 4.0f, 1.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 1.0f, 2.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ { 2.0f, 5.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 5.0f, 4.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ { 3.0f, 3.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
};
const NvBlastBondDesc bonds[8] =
{
// normal area centroid userData chunks
{ { { -1.0f, 0.0f, 0.0f }, 1.0f,{ 2.0f, 1.0f, 0.0f }, 0 },{ 1, 2 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 5.0f, 2.0f, 0.0f }, 0 },{ 1, 4 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 3.0f, 2.0f, 0.0f }, 0 },{ 5, 1 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 1.0f, 4.0f, 0.0f }, 0 },{ 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 2.0f, 3.0f, 0.0f }, 0 },{ 2, 5 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 4.0f, 5.0f, 0.0f }, 0 },{ 3, 4 } },
{ { { 0.0f, -1.0f, 0.0f }, 1.0f,{ 3.0f, 4.0f, 0.0f }, 0 },{ 3, 5 } },
{ { { -1.0f, 0.0f, 0.0f }, 1.0f,{ 4.0f, 3.0f, 0.0f }, 0 },{ 4, 5 } },
};
const NvBlastAssetDesc desc = { 6, chunks, 8, bonds };
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* amem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &desc, scratch.data(), messageLog);
ASSERT_TRUE(asset != nullptr);
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = NVBLAST_ALLOC(NvBlastAssetGetFamilyMemorySize(asset, nullptr));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, nullptr);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, nullptr));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), nullptr);
ASSERT_TRUE(actor != nullptr);
srand(0xb007);
for (uint32_t i = 0; i < 100000; i++)
{
float rx = 8 * (float)(rand()) / RAND_MAX - 1;
float ry = 8 * (float)(rand()) / RAND_MAX - 1;
float rz = 0.0f;
float rpos[] = { rx, ry, rz };
EXPECT_LE(-1.0f, rx); EXPECT_GE(7.0f, rx);
EXPECT_LE(-1.0f, ry); EXPECT_GE(7.0f, ry);
uint32_t expectedNode = 0xdefec7;
if (rx < 2.0f) {
if (ry < 4.0f) { expectedNode = 1; }
else { expectedNode = 2; }
}
else if (rx < 4.0f) {
if (ry < 2.0f) { expectedNode = 0; }
else if (ry < 4.0f) { expectedNode = 4; }
else { expectedNode = 2; }
}
else {
if (ry < 2.0f) { expectedNode = 0; }
else { expectedNode = 3; }
}
uint32_t nodeByBonds = findClosestNodeByBonds(rpos, actor);
if (nodeByBonds != expectedNode)
{
printf("%.1f %.1f %.1f\n", rx, ry, rz);
}
EXPECT_EQ(expectedNode, nodeByBonds);
uint32_t nodeByChunks = findClosestNodeByChunks(rpos, actor);
if (nodeByChunks != expectedNode)
{
printf("%.1f %.1f %.1f\n", rx, ry, rz);
}
EXPECT_EQ(expectedNode, nodeByChunks);
}
EXPECT_TRUE(NvBlastActorDeactivate(actor, messageLog));
NVBLAST_FREE(family);
NVBLAST_FREE(asset);
}
| 18,363 | C++ | 41.411085 | 138 | 0.593639 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/ActorTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "BlastBaseTest.h"
#include "AssetGenerator.h"
#include <map>
#include <random>
#include <algorithm>
#include "NvBlastActor.h"
#include "NvBlastExtDamageShaders.h"
#include "NvBlastExtLlSerialization.h"
#include "NvBlastExtSerialization.h"
static bool chooseRandomGraphNodes(uint32_t* g, uint32_t count, const Nv::Blast::Actor& actor)
{
const uint32_t graphNodeCount = actor.getGraphNodeCount();
if (graphNodeCount < count)
{
return false;
}
std::vector<uint32_t> graphNodeIndices(graphNodeCount);
uint32_t* index = graphNodeIndices.data();
for (Nv::Blast::Actor::GraphNodeIt i = actor; (bool)i ; ++i)
{
*index++ = (uint32_t)i;
}
struct UserDataSorter
{
UserDataSorter(const Nv::Blast::Actor& actor) : m_asset(*actor.getAsset()) {}
bool operator () (uint32_t i0, uint32_t i1) const
{
const uint32_t c0 = m_asset.m_graph.getChunkIndices()[i0];
const uint32_t c1 = m_asset.m_graph.getChunkIndices()[i1];
if (Nv::Blast::isInvalidIndex(c0) || Nv::Blast::isInvalidIndex(c1))
{
return c0 < c1;
}
return m_asset.getChunks()[c0].userData < m_asset.getChunks()[c1].userData;
}
const Nv::Blast::Asset& m_asset;
} userDataSorter(actor);
std::sort(graphNodeIndices.data(), graphNodeIndices.data() + graphNodeCount, userDataSorter);
#if 0
std::vector<uint32_t> descUserData(graphNodeCount);
for (uint32_t i = 0; i < graphNodeCount; ++i)
{
descUserData[i] = actor.getAsset()->m_chunks[actor.getAsset()->m_graph.m_chunkIndices[graphNodeIndices[i]]].userData;
}
#endif
uint32_t t = 0;
uint32_t m = 0;
for (uint32_t i = 0; i < graphNodeCount && m < count; ++i, ++t)
{
NVBLAST_ASSERT(t < graphNodeCount);
if (t >= graphNodeCount)
{
break;
}
const float U = (float)rand()/RAND_MAX; // U is uniform random number in [0,1)
if ((graphNodeCount - t)*U < count - m)
{
g[m++] = graphNodeIndices[i];
}
}
return m == count;
}
static void blast(std::set<NvBlastActor*>& actorsToDamage, GeneratorAsset* testAsset, GeneratorAsset::Vec3 localPos, float minRadius, float maxRadius, float compressiveDamage)
{
std::vector<NvBlastChunkFractureData> chunkEvents; /* num lower-support chunks + bonds */
std::vector<NvBlastBondFractureData> bondEvents; /* num lower-support chunks + bonds */
chunkEvents.resize(testAsset->solverChunks.size());
bondEvents.resize(testAsset->solverBonds.size());
std::vector<char> splitScratch;
std::vector<NvBlastActor*> newActorsBuffer(testAsset->solverChunks.size());
NvBlastExtRadialDamageDesc damage = {
compressiveDamage,
{ localPos.x, localPos.y, localPos.z },
minRadius,
maxRadius
};
NvBlastExtProgramParams programParams =
{
&damage,
nullptr
};
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
nullptr
};
size_t totalNewActorsCount = 0;
for (std::set<NvBlastActor*>::iterator k = actorsToDamage.begin(); k != actorsToDamage.end();)
{
NvBlastActor* actor = *k;
NvBlastFractureBuffers events = { static_cast<uint32_t>(bondEvents.size()), static_cast<uint32_t>(chunkEvents.size()), bondEvents.data(), chunkEvents.data() };
NvBlastActorGenerateFracture(&events, actor, program, &programParams, nullptr, nullptr);
NvBlastActorApplyFracture(&events, actor, &events, nullptr, nullptr);
const bool isDamaged = NvBlastActorIsSplitRequired(actor, nullptr);
bool removeActor = false;
if (events.bondFractureCount + events.chunkFractureCount > 0)
{
NvBlastActorSplitEvent splitEvent;
splitEvent.newActors = &newActorsBuffer.data()[totalNewActorsCount];
uint32_t newActorSize = (uint32_t)(newActorsBuffer.size() - totalNewActorsCount);
splitScratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, nullptr));
const size_t newActorsCount = NvBlastActorSplit(&splitEvent, actor, newActorSize, splitScratch.data(), nullptr, nullptr);
EXPECT_TRUE(isDamaged || newActorsCount == 0);
totalNewActorsCount += newActorsCount;
removeActor = splitEvent.deletedActor != NULL;
}
else
{
EXPECT_FALSE(isDamaged);
}
if (removeActor)
{
k = actorsToDamage.erase(k);
}
else
{
++k;
}
}
for (size_t i = 0; i < totalNewActorsCount; ++i)
{
actorsToDamage.insert(newActorsBuffer[i]);
}
}
template<int FailLevel, int Verbosity>
class ActorTest : public BlastBaseTest<FailLevel, Verbosity>
{
public:
ActorTest()
{
}
static void messageLog(int type, const char* msg, const char* file, int line)
{
BlastBaseTest<FailLevel, Verbosity>::messageLog(type, msg, file, line);
}
static void* alloc(size_t size)
{
return BlastBaseTest<FailLevel, Verbosity>::alignedZeroedAlloc(size);
}
static void free(void* mem)
{
BlastBaseTest<FailLevel, Verbosity>::alignedFree(mem);
}
NvBlastAsset* buildAsset(const NvBlastAssetDesc& desc)
{
// fix desc if wrong order or missing coverage first
NvBlastAssetDesc fixedDesc = desc;
std::vector<NvBlastChunkDesc> chunkDescs(desc.chunkDescs, desc.chunkDescs + desc.chunkCount);
std::vector<NvBlastBondDesc> bondDescs(desc.bondDescs, desc.bondDescs + desc.bondCount);
std::vector<uint32_t> chunkReorderMap(desc.chunkCount);
std::vector<char> scratch(desc.chunkCount * sizeof(NvBlastChunkDesc));
NvBlastEnsureAssetExactSupportCoverage(chunkDescs.data(), fixedDesc.chunkCount, scratch.data(), messageLog);
NvBlastReorderAssetDescChunks(chunkDescs.data(), fixedDesc.chunkCount, bondDescs.data(), fixedDesc.bondCount, chunkReorderMap.data(), true, scratch.data(), messageLog);
fixedDesc.chunkDescs = chunkDescs.data();
fixedDesc.bondDescs = bondDescs.empty() ? nullptr : bondDescs.data();
// create asset
m_scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&fixedDesc, messageLog));
void* mem = alloc(NvBlastGetAssetMemorySize(&fixedDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(mem, &fixedDesc, m_scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
return asset;
}
void buildAssets()
{
m_assets.resize(getAssetDescCount());
for (uint32_t i = 0; i < m_assets.size(); ++i)
{
m_assets[i] = buildAsset(g_assetDescs[i]);
}
}
NvBlastActor* instanceActor(const NvBlastAsset& asset)
{
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alloc(NvBlastAssetGetFamilyMemorySize(&asset, nullptr));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, &asset, nullptr);
std::vector<char> scratch((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
EXPECT_TRUE(scratch.capacity() > 0);
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
return actor;
}
void instanceActors()
{
m_actors.resize(m_assets.size());
for (uint32_t i = 0; i < m_actors.size(); ++i)
{
m_actors[i] = instanceActor(*m_assets[i]);
}
}
void releaseActors()
{
for (uint32_t i = 0; i < m_actors.size(); ++i)
{
NvBlastFamily* family = NvBlastActorGetFamily(m_actors[i], messageLog);
const bool actorReleaseResult = NvBlastActorDeactivate(m_actors[i], messageLog);
EXPECT_TRUE(actorReleaseResult);
free(family);
}
}
void destroyAssets()
{
for (uint32_t i = 0; i < m_assets.size(); ++i)
{
free(m_assets[i]);
}
}
void instanceAndPartitionRecursively
(
const NvBlastAsset& asset,
bool partitionToSubsupport,
void (*preSplitTest)(const Nv::Blast::Actor&, NvBlastLog),
void (*postSplitTest)(const std::vector<Nv::Blast::Actor*>&, uint32_t, uint32_t, bool)
)
{
const Nv::Blast::Asset& solverAsset = *static_cast<const Nv::Blast::Asset*>(&asset);
std::vector<Nv::Blast::Actor*> actors;
std::vector<Nv::Blast::Actor*> buffer(NvBlastAssetGetChunkCount(&asset, messageLog));
// Instance the first actor from the asset
actors.push_back(static_cast<Nv::Blast::Actor*>(instanceActor(asset)));
NvBlastFamily* family = NvBlastActorGetFamily(actors[0], messageLog);
const uint32_t supportChunkCount = NvBlastAssetGetSupportChunkCount(&asset, messageLog);
const uint32_t leafChunkCount = actors[0]->getAsset()->m_leafChunkCount;
// Now randomly partition the actors in the array, and keep going until we're down to single support chunks
bool canFracture = true;
while (canFracture)
{
canFracture = false;
for (uint32_t actorToPartition = 0; actorToPartition < actors.size(); ++actorToPartition)
{
Nv::Blast::Actor* a = (Nv::Blast::Actor*)actors[actorToPartition];
if (a == nullptr)
{
continue;
}
m_scratch.reserve((size_t)NvBlastActorGetRequiredScratchForSplit(a, messageLog));
if (preSplitTest)
{
preSplitTest(*a, nullptr);
}
const bool singleLowerSupportChunk = a->getGraphNodeCount() <= 1;
uint32_t newActorCount = 0;
for (int damageNum = 0; newActorCount < 2 && damageNum < 100; ++damageNum) // Avoid infinite loops
{
if (!singleLowerSupportChunk)
{
uint32_t g[2];
chooseRandomGraphNodes(g, 2, *a);
const uint32_t bondIndex = solverAsset.m_graph.findBond(g[0], g[1]);
if (bondIndex != Nv::Blast::invalidIndex<uint32_t>())
{
a->damageBond(g[0], g[1], bondIndex, 100.0f);
a->findIslands(m_scratch.data());
}
}
else
if (!partitionToSubsupport)
{
continue;
}
// Split actor
newActorCount = a->partition((Nv::Blast::Actor**)&buffer[0], (uint32_t)buffer.size(), messageLog);
if (newActorCount >= 2)
{
actors[actorToPartition] = nullptr;
}
}
if (newActorCount > 1)
{
canFracture = true;
}
for (uint32_t i = 0; i < newActorCount; ++i)
{
actors.push_back(buffer[i]);
buffer[i]->updateVisibleChunksFromGraphNodes();
}
}
}
if (postSplitTest)
{
postSplitTest(actors, leafChunkCount, supportChunkCount, partitionToSubsupport);
}
for (auto actor : actors)
{
if (actor)
actor->release();
}
free(family);
}
static void recursivePartitionPostSplitTestCounts(const std::vector<Nv::Blast::Actor*>& actors, uint32_t leafChunkCount, uint32_t supportChunkCount, bool partitionToSubsupport)
{
// Test to see that all actors are split down to single support chunks
uint32_t remainingActorCount = 0;
for (uint32_t i = 0; i < actors.size(); ++i)
{
Nv::Blast::Actor* a = (Nv::Blast::Actor*)actors[i];
if (a == nullptr)
{
continue;
}
++remainingActorCount;
NVBLAST_ASSERT(1 == a->getVisibleChunkCount() || a->hasExternalBonds());
EXPECT_TRUE(1 == a->getVisibleChunkCount() || a->hasExternalBonds());
if (!partitionToSubsupport)
{
EXPECT_EQ(1, a->getGraphNodeCount());
}
if (0 == a->getVisibleChunkCount())
{
EXPECT_TRUE(a->hasExternalBonds());
EXPECT_EQ(1, a->getGraphNodeCount());
EXPECT_EQ(a->getFamilyHeader()->m_asset->m_graph.m_nodeCount - 1, a->getFirstGraphNodeIndex());
--remainingActorCount; // Do not count this as a remaining actor, to be compared with leaf or support chunk counts later
}
const bool actorReleaseResult = NvBlastActorDeactivate(actors[i], nullptr);
EXPECT_TRUE(actorReleaseResult);
}
if (partitionToSubsupport)
{
EXPECT_EQ(leafChunkCount, remainingActorCount);
}
else
{
EXPECT_EQ(supportChunkCount, remainingActorCount);
}
}
static void testActorVisibleChunks(const Nv::Blast::Actor& actor, NvBlastLog)
{
const Nv::Blast::Asset& asset = *actor.getAsset();
const NvBlastChunk* chunks = asset.getChunks();
if (actor.isSubSupportChunk())
{
EXPECT_EQ(1, actor.getVisibleChunkCount());
const uint32_t firstVisibleChunkIndex = (uint32_t)Nv::Blast::Actor::VisibleChunkIt(actor);
EXPECT_EQ(actor.getIndex() - asset.m_graph.m_nodeCount, firstVisibleChunkIndex - asset.m_firstSubsupportChunkIndex);
// Make sure the visible chunk is subsupport
// Array of support flags
std::vector<bool> isSupport(asset.m_chunkCount, false);
for (uint32_t i = 0; i < asset.m_graph.m_nodeCount; ++i)
{
const uint32_t chunkIndex = asset.m_graph.getChunkIndices()[i];
if (!Nv::Blast::isInvalidIndex(chunkIndex))
{
isSupport[chunkIndex] = true;
}
}
// Climb hierarchy to find support chunk
uint32_t chunkIndex = firstVisibleChunkIndex;
while (chunkIndex != Nv::Blast::invalidIndex<uint32_t>())
{
if (isSupport[chunkIndex])
{
break;
}
chunkIndex = chunks[chunkIndex].parentChunkIndex;
}
EXPECT_FALSE(Nv::Blast::isInvalidIndex(chunkIndex));
}
else
{
// Array of visibility flags
std::vector<bool> isVisible(asset.m_chunkCount, false);
for (Nv::Blast::Actor::VisibleChunkIt i = actor; (bool)i; ++i)
{
isVisible[(uint32_t)i] = true;
}
// Mark visible nodes representing graph chunks
std::vector<bool> visibleChunkFound(asset.m_chunkCount, false);
// Make sure every graph chunk is represented by a visible chunk, or represents the world
for (Nv::Blast::Actor::GraphNodeIt i = actor; (bool)i; ++i)
{
const uint32_t graphNodeIndex = (uint32_t)i;
uint32_t chunkIndex = asset.m_graph.getChunkIndices()[graphNodeIndex];
// Climb hierarchy to find visible chunk
while (chunkIndex != Nv::Blast::invalidIndex<uint32_t>())
{
// Check that chunk owners are accurate
EXPECT_EQ(actor.getIndex(), actor.getFamilyHeader()->getChunkActorIndices()[chunkIndex]);
if (isVisible[chunkIndex])
{
visibleChunkFound[chunkIndex] = true;
break;
}
chunkIndex = chunks[chunkIndex].parentChunkIndex;
}
EXPECT_TRUE(!Nv::Blast::isInvalidIndex(chunkIndex) || (graphNodeIndex == asset.m_graph.m_nodeCount-1 && actor.hasExternalBonds()));
}
// Check that all visible chunks are accounted for
for (uint32_t i = 0; i < asset.m_chunkCount; ++i)
{
EXPECT_EQ(visibleChunkFound[i], isVisible[i]);
}
// Make sure that, if all siblings are intact, they are invisible
for (uint32_t i = 0; i < asset.m_chunkCount; ++i)
{
bool allIntact = true;
bool noneVisible = true;
if (chunks[i].firstChildIndex < asset.getUpperSupportChunkCount()) // Do not check subsupport
{
for (uint32_t j = chunks[i].firstChildIndex; j < chunks[i].childIndexStop; ++j)
{
allIntact = allIntact && actor.getFamilyHeader()->getChunkActorIndices()[j] == actor.getIndex();
noneVisible = noneVisible && !isVisible[j];
}
EXPECT_TRUE(!allIntact || noneVisible);
}
}
}
}
static void recursivePartitionPostSplitTestVisibleChunks(const std::vector<Nv::Blast::Actor*>& actors, uint32_t leafChunkCount, uint32_t supportChunkCount, bool partitionToSubsupport)
{
for (uint32_t i = 0; i < actors.size(); ++i)
{
Nv::Blast::Actor* a = (Nv::Blast::Actor*)actors[i];
if (a == nullptr)
{
continue;
}
testActorVisibleChunks(*a, nullptr);
}
}
void partitionActorsToSupportChunks
(
uint32_t assetDescCount,
const NvBlastAssetDesc* assetDescs,
void(*preSplitTest)(const Nv::Blast::Actor&, NvBlastLog),
void(*postSplitTest)(const std::vector<Nv::Blast::Actor*>&, uint32_t, uint32_t, bool),
bool partitionToSubsupport
)
{
srand(0);
for (uint32_t i = 0; i < assetDescCount; ++i)
{
// Create an asset
NvBlastAsset* asset = buildAsset(assetDescs[i]);
// Perform repeated partitioning
instanceAndPartitionRecursively(*asset, partitionToSubsupport, preSplitTest, postSplitTest);
// Free the asset
free(asset);
}
}
static void compareFamilies(const NvBlastFamily* family1, const NvBlastFamily* family2, NvBlastLog logFn)
{
// first check that the family sizes are the same
// still do the byte comparison even if they aren't equal to make it easier to spot where things went wrong
const uint32_t size1 = NvBlastFamilyGetSize(family1, logFn);
const uint32_t size2 = NvBlastFamilyGetSize(family2, logFn);
const uint32_t size = std::min(size1, size2);
if (size1 != size2)
{
std::ostringstream msg;
msg << "Family deserialization sizes don't match [" << size1 << ", " << size2 << "].";
logFn(NvBlastMessage::Error, msg.str().c_str(), __FILE__, __LINE__);
}
const char* block1 = reinterpret_cast<const char*>(family1);
const char* block2 = reinterpret_cast<const char*>(family2);
#if 0
EXPECT_EQ(0, memcmp(block1, block2, size));
#else
bool diffFound = false;
size_t startDiff = 0;
for (size_t i = 0; i < size; ++i)
{
if (block1[i] != block2[i])
{
diffFound = true;
startDiff = i;
break;
}
}
if (!diffFound)
{
return;
}
size_t endDiff = startDiff;
for (size_t i = size; i--;)
{
if (block1[i] != block2[i])
{
endDiff = i;
break;
}
}
std::ostringstream msg;
msg << "Family deserialization does not match in range [" << startDiff << ", " << endDiff << "].";
logFn(NvBlastMessage::Error, msg.str().c_str(), __FILE__, __LINE__);
#endif
}
static void testActorBlockSerialize(std::vector<NvBlastActor*>& actors, NvBlastLog logFn)
{
if (actors.size())
{
const NvBlastFamily* family = NvBlastActorGetFamily(actors[0], logFn);
const uint32_t size = NvBlastFamilyGetSize(family, logFn);
s_storage.insert(s_storage.end(), (char*)family, (char*)family + size);
}
}
static void testActorCapnSerialize(std::vector<NvBlastActor*>& actors, NvBlastLog logFn)
{
if (actors.size())
{
const NvBlastFamily* family = NvBlastActorGetFamily(actors[0], logFn);
Nv::Blast::ExtSerialization* ser = NvBlastExtSerializationCreate();
EXPECT_TRUE(ser != nullptr);
EXPECT_TRUE(ser->getSerializationEncoding() == Nv::Blast::ExtSerialization::EncodingID::CapnProtoBinary);
void* serializedFamilyBuffer = nullptr;
const uint64_t serialFamilySize =
ser->serializeIntoBuffer(serializedFamilyBuffer, family, Nv::Blast::LlObjectTypeID::Family);
EXPECT_TRUE(serialFamilySize != 0);
s_storage.insert(s_storage.end(), (char*)&serialFamilySize, (char*)&serialFamilySize + sizeof(uint64_t));
s_storage.insert(s_storage.end(), (char*)serializedFamilyBuffer, (char*)serializedFamilyBuffer + serialFamilySize);
}
}
static void testActorDeserializeCommon(const NvBlastFamily* family, std::vector<NvBlastActor*>& actors, uint32_t size, NvBlastLog logFn)
{
EXPECT_LT(s_curr, s_storage.size());
EXPECT_TRUE(size > 0);
EXPECT_LE(s_curr + size, s_storage.size());
s_curr += size;
const NvBlastFamily* actorFamily = NvBlastActorGetFamily(actors[0], logFn);
// Family may contain different assets pointers, copy into new family block and set the same asset before comparing
Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actors[0]);
const Nv::Blast::Asset* solverAsset = a.getAsset();
const uint32_t familySize = NvBlastFamilyGetSize(family, logFn);
std::vector<char> storageFamilyCopy((char*)family, (char*)family + familySize);
NvBlastFamily* storageFamily = reinterpret_cast<NvBlastFamily*>(storageFamilyCopy.data());
NvBlastFamilySetAsset(storageFamily, solverAsset, logFn);
{
const uint32_t actorCountExpected = NvBlastFamilyGetActorCount(storageFamily, logFn);
std::vector<NvBlastActor*> blockActors(actorCountExpected);
const uint32_t actorCountReturned = NvBlastFamilyGetActors(blockActors.data(), actorCountExpected, storageFamily, logFn);
EXPECT_EQ(actorCountExpected, actorCountReturned);
}
compareFamilies(storageFamily, actorFamily, logFn);
}
static void testActorBlockDeserialize(std::vector<NvBlastActor*>& actors, NvBlastLog logFn)
{
if (actors.size())
{
const NvBlastFamily* family = reinterpret_cast<NvBlastFamily*>(&s_storage[s_curr]);
const uint32_t size = NvBlastFamilyGetSize(family, logFn);
testActorDeserializeCommon(family, actors, size, logFn);
}
}
static void testActorCapnDeserialize(std::vector<NvBlastActor*>& actors, NvBlastLog logFn)
{
if (actors.size())
{
Nv::Blast::ExtSerialization* ser = NvBlastExtSerializationCreate();
EXPECT_TRUE(ser->getSerializationEncoding() == Nv::Blast::ExtSerialization::EncodingID::CapnProtoBinary);
// the serialized size is stored in the stream right before the data itself, pull it out first
uint32_t objTypeId;
const uint64_t& size = *reinterpret_cast<const uint64_t*>(&s_storage[s_curr]);
s_curr += sizeof(uint64_t);
EXPECT_LE(size, UINT32_MAX);
// now read the buffer itself
void* object = ser->deserializeFromBuffer(&s_storage[s_curr], size, &objTypeId);
EXPECT_TRUE(object != nullptr);
EXPECT_TRUE(objTypeId == Nv::Blast::LlObjectTypeID::Family);
// finally compare it with the original family
const NvBlastFamily* family = reinterpret_cast<NvBlastFamily*>(object);
testActorDeserializeCommon(family, actors, (uint32_t)size, logFn);
}
}
// Serialize all actors and then deserialize back into a new family in a random order, and compare with the original family
static void testActorSerializationNewFamily(std::vector<NvBlastActor*>& actors, NvBlastLog logFn)
{
if (actors.size() == 0)
{
return;
}
Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actors[0]);
const Nv::Blast::Asset* solverAsset = a.getAsset();
const uint32_t serSizeBound = NvBlastAssetGetActorSerializationSizeUpperBound(solverAsset, logFn);
std::vector< std::vector<char> > streams(actors.size());
for (size_t i = 0; i < actors.size(); ++i)
{
const uint32_t serSize = NvBlastActorGetSerializationSize(actors[i], logFn);
EXPECT_GE(serSizeBound, serSize);
std::vector<char>& stream = streams[i];
stream.resize(serSize);
const uint32_t bytesWritten = NvBlastActorSerialize(stream.data(), serSize, actors[i], logFn);
EXPECT_EQ(serSize, bytesWritten);
}
void* fmem = alloc(NvBlastAssetGetFamilyMemorySize(solverAsset, logFn));
NvBlastFamily* newFamily = NvBlastAssetCreateFamily(fmem, solverAsset, logFn);
std::vector<size_t> order(actors.size());
for (size_t i = 0; i < order.size(); ++i)
{
order[i] = i;
}
std::random_device rd;
std::mt19937 g(rd());
std::shuffle(order.begin(), order.end(), g);
for (size_t i = 0; i < actors.size(); ++i)
{
NvBlastActor* newActor = NvBlastFamilyDeserializeActor(newFamily, streams[order[i]].data(), logFn);
EXPECT_TRUE(newActor != nullptr);
}
const NvBlastFamily* oldFamily = NvBlastActorGetFamily(&a, logFn);
// Allow there to be differences with invalid actors
const Nv::Blast::FamilyHeader* f1 = reinterpret_cast<const Nv::Blast::FamilyHeader*>(oldFamily);
const Nv::Blast::FamilyHeader* f2 = reinterpret_cast<const Nv::Blast::FamilyHeader*>(newFamily);
for (uint32_t actorN = 0; actorN < f1->getActorsArraySize(); ++actorN)
{
const Nv::Blast::Actor* a1 = f1->getActors() + actorN;
Nv::Blast::Actor* a2 = const_cast<Nv::Blast::Actor*>(f2->getActors() + actorN);
EXPECT_EQ(a1->isActive(), a2->isActive());
if (!a1->isActive())
{
*a2 = *a1; // Actual data does not matter, setting equal to pass comparison
}
}
compareFamilies(oldFamily, newFamily, logFn);
free(newFamily);
}
// Copy the family and then serialize some subset of actors, deleting them afterwards.
// Then, deserialize back into the block and compare the original and new families.
static void testActorSerializationPartialBlock(std::vector<NvBlastActor*>& actors, NvBlastLog logFn)
{
if (actors.size() <= 1)
{
return;
}
Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actors[0]);
const Nv::Blast::Asset* solverAsset = a.getAsset();
const NvBlastFamily* oldFamily = NvBlastActorGetFamily(&a, logFn);
const uint32_t size = NvBlastFamilyGetSize(oldFamily, logFn);
std::vector<char> buffer((char*)oldFamily, (char*)oldFamily + size);
NvBlastFamily* familyCopy = reinterpret_cast<NvBlastFamily*>(buffer.data());
const uint32_t serCount = 1 + (rand() % actors.size() - 1);
const uint32_t actorCount = NvBlastFamilyGetActorCount(familyCopy, logFn);
std::vector<NvBlastActor*> actorsRemaining(actorCount);
const uint32_t actorsInFamily = NvBlastFamilyGetActors(&actorsRemaining[0], actorCount, familyCopy, logFn);
EXPECT_EQ(actorCount, actorsInFamily);
const uint32_t serSizeBound = NvBlastAssetGetActorSerializationSizeUpperBound(solverAsset, logFn);
std::vector< std::vector<char> > streams(serCount);
for (uint32_t i = 0; i < serCount; ++i)
{
std::vector<char>& stream = streams[i];
const uint32_t indexToStream = rand() % actorsRemaining.size();
NvBlastActor* actorToStream = actorsRemaining[indexToStream];
std::swap(actorsRemaining[indexToStream], actorsRemaining[actorsRemaining.size() - 1]);
actorsRemaining.pop_back();
const uint32_t serSize = NvBlastActorGetSerializationSize(actorToStream, logFn);
EXPECT_GE(serSizeBound, serSize);
stream.resize(serSize);
const uint32_t bytesWritten = NvBlastActorSerialize(&stream[0], serSize, actorToStream, logFn);
EXPECT_EQ(serSize, bytesWritten);
NvBlastActorDeactivate(actorToStream, logFn);
}
for (uint32_t i = 0; i < serCount; ++i)
{
NvBlastActor* newActor = NvBlastFamilyDeserializeActor(familyCopy, streams[i].data(), logFn);
EXPECT_TRUE(newActor != nullptr);
}
compareFamilies(oldFamily, familyCopy, logFn);
}
void damageLeafSupportActors
(
uint32_t assetCount,
uint32_t familyCount,
uint32_t damageCount,
bool simple,
void (*actorTest)(const Nv::Blast::Actor&, NvBlastLog),
void (*postDamageTest)(std::vector<NvBlastActor*>&, NvBlastLog),
CubeAssetGenerator::BondFlags bondFlags = CubeAssetGenerator::BondFlags::ALL_INTERNAL_BONDS
)
{
const float relativeDamageRadius = simple ? 0.75f : 0.2f;
const float compressiveDamage = 1.0f;
const uint32_t minChunkCount = simple ? 9 : 100;
const uint32_t maxChunkCount = simple ? 9 : 10000;
const bool printActorCount = false;
srand(0);
std::cout << "Asset # (out of " << assetCount << "): ";
for (uint32_t assetNum = 0; assetNum < assetCount; ++assetNum)
{
std::cout << assetNum + 1 << ".. ";
CubeAssetGenerator::Settings settings;
settings.extents = GeneratorAsset::Vec3(1, 1, 1);
settings.bondFlags = bondFlags;
CubeAssetGenerator::DepthInfo depthInfo;
depthInfo.slicesPerAxis = GeneratorAsset::Vec3(1, 1, 1);
depthInfo.flag = NvBlastChunkDesc::Flags::NoFlags;
settings.depths.push_back(depthInfo);
uint32_t chunkCount = 1;
while (chunkCount < minChunkCount)
{
uint32_t chunkMul;
do
{
depthInfo.slicesPerAxis = simple ? GeneratorAsset::Vec3(2, 2, 2) : GeneratorAsset::Vec3((float)(1 + rand() % 4), (float)(1 + rand() % 4), (float)(1 + rand() % 4));
chunkMul = (uint32_t)(depthInfo.slicesPerAxis.x * depthInfo.slicesPerAxis.y * depthInfo.slicesPerAxis.z);
} while (chunkMul == 1);
if (chunkCount*chunkMul > maxChunkCount)
{
break;
}
chunkCount *= chunkMul;
settings.depths.push_back(depthInfo);
settings.extents = settings.extents * depthInfo.slicesPerAxis;
}
settings.depths.back().flag = NvBlastChunkDesc::SupportFlag; // Leaves are support
// Make largest direction unit size
settings.extents = settings.extents * (1.0f / std::max(settings.extents.x, std::max(settings.extents.y, settings.extents.z)));
// Create asset
GeneratorAsset testAsset;
CubeAssetGenerator::generate(testAsset, settings);
NvBlastAssetDesc desc;
desc.chunkDescs = testAsset.solverChunks.data();
desc.chunkCount = (uint32_t)testAsset.solverChunks.size();
desc.bondDescs = testAsset.solverBonds.data();
desc.bondCount = (uint32_t)testAsset.solverBonds.size();
NvBlastAsset* asset = buildAsset(desc);
NvBlastID assetID = NvBlastAssetGetID(asset, messageLog);
// copy asset (for setAsset testing)
const char* data = (const char*)asset;
const uint32_t dataSize = NvBlastAssetGetSize(asset, messageLog);
char* duplicateData = (char*)alloc(dataSize);
memcpy(duplicateData, data, dataSize);
NvBlastAsset* assetDuplicate = (NvBlastAsset*)duplicateData;
// Generate families
for (uint32_t familyNum = 0; familyNum < familyCount; ++familyNum)
{
// family
void* fmem = alloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog); // Using zeroingAlloc in case actorTest compares memory blocks
NvBlastID id = NvBlastFamilyGetAssetID(family, messageLog);
EXPECT_TRUE(!memcmp(&assetID, &id, sizeof(NvBlastID)));
if (rand() % 2 == 0)
{
// replace asset with duplicate in half of cases to test setAsset
NvBlastFamilySetAsset(family, assetDuplicate, messageLog);
NvBlastID id2 = NvBlastFamilyGetAssetID(family, messageLog);
EXPECT_TRUE(!memcmp(&assetID, &id2, sizeof(NvBlastID)));
}
// actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
m_scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, m_scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// Generate damage
std::set<NvBlastActor*> actors;
actors.insert(actor);
if (printActorCount) std::cout << "Actors: 1.. ";
for (uint32_t damageNum = 0; damageNum < damageCount; ++damageNum)
{
GeneratorAsset::Vec3 localPos = settings.extents*GeneratorAsset::Vec3((float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f);
blast(actors, &testAsset, localPos, relativeDamageRadius, relativeDamageRadius*1.2f, compressiveDamage);
if (printActorCount) std::cout << actors.size() << ".. ";
if (actors.size() > 0)
{
const NvBlastFamily* family = NvBlastActorGetFamily(*actors.begin(), messageLog);
const uint32_t actorCount = NvBlastFamilyGetActorCount(family, messageLog);
EXPECT_EQ((uint32_t)actors.size(), actorCount);
if ((uint32_t)actors.size() == actorCount)
{
std::vector<NvBlastActor*> buffer1(actorCount);
const uint32_t actorsWritten = NvBlastFamilyGetActors(&buffer1[0], actorCount, family, messageLog);
EXPECT_EQ(actorsWritten, actorCount);
std::vector<NvBlastActor*> buffer2(actors.begin(), actors.end());
EXPECT_EQ(0, memcmp(&buffer1[0], buffer2.data(), actorCount*sizeof(NvBlastActor*)));
}
}
// Test individual actors
if (actorTest != nullptr)
{
for (std::set<NvBlastActor*>::iterator k = actors.begin(); k != actors.end(); ++k)
{
actorTest(*static_cast<Nv::Blast::Actor*>(*k), messageLog);
}
}
}
if (printActorCount) std::cout << "\n";
// Test fractured actor set
if (postDamageTest)
{
std::vector<NvBlastActor*> actorArray(actors.begin(), actors.end());
postDamageTest(actorArray, messageLog);
}
// Release remaining actors
for (std::set<NvBlastActor*>::iterator k = actors.begin(); k != actors.end(); ++k)
{
NvBlastActorDeactivate(*k, messageLog);
}
actors.clear();
free(family);
}
// Release asset data
free(asset);
free(assetDuplicate);
}
std::cout << "done.\n";
}
std::vector<NvBlastAsset*> m_assets;
std::vector<NvBlastActor*> m_actors;
std::vector<char> m_scratch;
static std::vector<char> s_storage;
static size_t s_curr;
};
// Static values
template<int FailLevel, int Verbosity>
std::vector<char> ActorTest<FailLevel, Verbosity>::s_storage;
template<int FailLevel, int Verbosity>
size_t ActorTest<FailLevel, Verbosity>::s_curr;
// Specializations
typedef ActorTest<NvBlastMessage::Error, 1> ActorTestAllowWarnings;
typedef ActorTest<NvBlastMessage::Warning, 1> ActorTestStrict;
// Tests
TEST_F(ActorTestStrict, InstanceActors)
{
// Build assets and instance actors
buildAssets();
instanceActors();
// Release actors and destroy assets
releaseActors();
destroyAssets();
}
TEST_F(ActorTestAllowWarnings, ActorHealthInitialization)
{
// Test all assets
std::vector<NvBlastAssetDesc> assetDescs;
assetDescs.insert(assetDescs.end(), g_assetDescs, g_assetDescs + getAssetDescCount());
assetDescs.insert(assetDescs.end(), g_assetDescsMissingCoverage, g_assetDescsMissingCoverage + getAssetDescMissingCoverageCount());
struct TestMode
{
enum Enum
{
Uniform,
Nonuniform,
Count
};
};
for (auto assetDesc : assetDescs)
{
NvBlastAsset* asset = buildAsset(assetDesc);
EXPECT_TRUE(asset != nullptr);
Nv::Blast::Asset& assetInt = static_cast<Nv::Blast::Asset&>(*asset);
NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, nullptr);
std::vector<float> supportChunkHealths(graph.nodeCount);
for (size_t i = 0; i < supportChunkHealths.size(); ++i)
{
supportChunkHealths[i] = 1.0f + (float)i;
}
std::vector<float> bondHealths(assetInt.getBondCount());
for (size_t i = 0; i < bondHealths.size(); ++i)
{
bondHealths[i] = 1.5f + (float)i;
}
for (int chunkTestMode = 0; chunkTestMode < TestMode::Count; ++chunkTestMode)
{
for (int bondTestMode = 0; bondTestMode < TestMode::Count; ++bondTestMode)
{
NvBlastActorDesc actorDesc;
switch (chunkTestMode)
{
default:
case TestMode::Uniform:
actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
break;
case TestMode::Nonuniform:
actorDesc.initialSupportChunkHealths = supportChunkHealths.data();
break;
}
switch (bondTestMode)
{
default:
case TestMode::Uniform:
actorDesc.initialBondHealths = nullptr;
actorDesc.uniformInitialBondHealth = 2.0f;
break;
case TestMode::Nonuniform:
actorDesc.initialBondHealths = bondHealths.data();
break;
}
void* fmem = alloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, nullptr);
std::vector<char> scratch((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
Nv::Blast::Actor& actorInt = static_cast<Nv::Blast::Actor&>(*actor);
Nv::Blast::FamilyHeader* header = actorInt.getFamilyHeader();
for (uint32_t i = 0; i < graph.nodeCount; ++i)
{
const uint32_t supportChunkIndex = graph.chunkIndices[i];
for (Nv::Blast::Asset::DepthFirstIt it(assetInt, supportChunkIndex); (bool)it; ++it)
{
const uint32_t chunkIndex = (uint32_t)it;
const uint32_t lowerSupportIndex = assetInt.getContiguousLowerSupportIndex(chunkIndex);
NVBLAST_ASSERT(lowerSupportIndex < assetInt.getLowerSupportChunkCount());
const float health = header->getLowerSupportChunkHealths()[lowerSupportIndex];
switch (chunkTestMode)
{
default:
case TestMode::Uniform:
EXPECT_EQ(1.0f, health);
break;
case TestMode::Nonuniform:
EXPECT_EQ(supportChunkHealths[i], health);
break;
}
}
}
for (uint32_t i = 0; i < assetInt.getBondCount(); ++i)
{
switch (bondTestMode)
{
default:
case TestMode::Uniform:
EXPECT_EQ(2.0f, header->getBondHealths()[i]);
break;
case TestMode::Nonuniform:
EXPECT_EQ(bondHealths[i], header->getBondHealths()[i]);
break;
}
}
NvBlastActorDeactivate(actor, messageLog);
free(family);
}
}
free(asset);
}
}
TEST_F(ActorTestStrict, PartitionActorsToSupportChunksTestCounts)
{
partitionActorsToSupportChunks(getAssetDescCount(), g_assetDescs, nullptr, recursivePartitionPostSplitTestCounts, false);
}
TEST_F(ActorTestAllowWarnings, PartitionActorsFromBadDescriptorsToSupportChunksTestCounts)
{
partitionActorsToSupportChunks(getAssetDescMissingCoverageCount(), g_assetDescsMissingCoverage, nullptr, recursivePartitionPostSplitTestCounts, false);
}
TEST_F(ActorTestStrict, PartitionActorsToLeafChunksTestCounts)
{
partitionActorsToSupportChunks(getAssetDescCount(), g_assetDescs, nullptr, recursivePartitionPostSplitTestCounts, true);
}
TEST_F(ActorTestAllowWarnings, PartitionActorsFromBadDescriptorsToLeafChunksTestCounts)
{
partitionActorsToSupportChunks(getAssetDescMissingCoverageCount(), g_assetDescsMissingCoverage, nullptr, recursivePartitionPostSplitTestCounts, true);
}
TEST_F(ActorTestStrict, PartitionActorsToSupportChunksTestVisibility)
{
partitionActorsToSupportChunks(getAssetDescCount(), g_assetDescs, testActorVisibleChunks, recursivePartitionPostSplitTestVisibleChunks, false);
}
TEST_F(ActorTestAllowWarnings, PartitionActorsFromBadDescriptorsToSupportChunksTestVisibility)
{
partitionActorsToSupportChunks(getAssetDescMissingCoverageCount(), g_assetDescsMissingCoverage, testActorVisibleChunks, recursivePartitionPostSplitTestVisibleChunks, false);
}
TEST_F(ActorTestStrict, PartitionActorsToLeafChunksTestVisibility)
{
partitionActorsToSupportChunks(getAssetDescCount(), g_assetDescs, testActorVisibleChunks, recursivePartitionPostSplitTestVisibleChunks, true);
}
TEST_F(ActorTestAllowWarnings, PartitionActorsFromBadDescriptorsToLeafChunksTestVisibility)
{
partitionActorsToSupportChunks(getAssetDescMissingCoverageCount(), g_assetDescsMissingCoverage, testActorVisibleChunks, recursivePartitionPostSplitTestVisibleChunks, true);
}
TEST_F(ActorTestStrict, DamageLeafSupportActorsTestVisibility)
{
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr);
}
TEST_F(ActorTestStrict, DamageLeafSupportActorTestBlockSerialization)
{
typedef CubeAssetGenerator::BondFlags BF;
s_storage.resize(0);
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorBlockSerialize);
s_curr = 0;
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorBlockDeserialize);
s_storage.resize(0);
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorBlockSerialize, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
s_curr = 0;
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorBlockDeserialize, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
s_storage.resize(0);
s_storage.resize(0);
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorCapnSerialize);
s_curr = 0;
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorCapnDeserialize);
s_storage.resize(0);
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorCapnSerialize, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
s_curr = 0;
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorCapnDeserialize, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
s_storage.resize(0);
}
TEST_F(ActorTestStrict, DISABLED_DamageSimpleLeafSupportActorTestActorSerializationNewFamily)
{
typedef CubeAssetGenerator::BondFlags BF;
damageLeafSupportActors(1, 1, 4, true, nullptr, testActorSerializationNewFamily);
damageLeafSupportActors(1, 1, 4, true, nullptr, testActorSerializationNewFamily, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
}
TEST_F(ActorTestStrict, DamageSimpleLeafSupportActorTestActorSerializationPartialBlock)
{
typedef CubeAssetGenerator::BondFlags BF;
damageLeafSupportActors(1, 1, 4, true, nullptr, testActorSerializationPartialBlock);
damageLeafSupportActors(1, 1, 4, true, nullptr, testActorSerializationPartialBlock, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
}
TEST_F(ActorTestStrict, DISABLED_DamageLeafSupportActorTestActorSerializationNewFamily)
{
typedef CubeAssetGenerator::BondFlags BF;
damageLeafSupportActors(4, 4, 4, false, nullptr, testActorSerializationNewFamily);
damageLeafSupportActors(4, 4, 4, false, nullptr, testActorSerializationNewFamily, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
}
TEST_F(ActorTestStrict, DamageLeafSupportActorTestActorSerializationPartialBlock)
{
typedef CubeAssetGenerator::BondFlags BF;
damageLeafSupportActors(4, 4, 4, false, nullptr, testActorSerializationPartialBlock);
damageLeafSupportActors(4, 4, 4, false, nullptr, testActorSerializationPartialBlock, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
}
TEST_F(ActorTestStrict, DamageMultipleIslandLeafSupportActorsTestVisibility)
{
typedef CubeAssetGenerator::BondFlags BF;
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::Y_BONDS | BF::Z_BONDS); // Only connect y-z plane islands
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::Z_BONDS); // Only connect z-direction islands
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::NO_BONDS); // All support chunks disconnected (single-chunk islands)
}
TEST_F(ActorTestStrict, DamageBoundToWorldLeafSupportActorsTestVisibility)
{
typedef CubeAssetGenerator::BondFlags BF;
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::ALL_INTERNAL_BONDS | BF::X_MINUS_WORLD_BONDS);
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::ALL_INTERNAL_BONDS | BF::Y_PLUS_WORLD_BONDS);
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::ALL_INTERNAL_BONDS | BF::X_PLUS_WORLD_BONDS | BF::Y_MINUS_WORLD_BONDS);
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::ALL_INTERNAL_BONDS | BF::X_PLUS_WORLD_BONDS | BF::X_MINUS_WORLD_BONDS
| BF::Y_PLUS_WORLD_BONDS | BF::Y_MINUS_WORLD_BONDS
| BF::Z_PLUS_WORLD_BONDS | BF::Z_MINUS_WORLD_BONDS);
}
| 51,019 | C++ | 40.581092 | 189 | 0.604049 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/AssetTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastAsset.h"
#include "NvBlastMath.h"
#include "BlastBaseTest.h"
#include "NvBlastTkFramework.h"
#include <random>
#include <algorithm>
// all supported platform now provide serialization
// keep the define for future platforms that won't
#define ENABLE_SERIALIZATION_TESTS 1
#pragma warning( push )
#pragma warning( disable : 4267 )
// NOTE: Instead of excluding serialization and the tests when on VC12, should break the tests out into a separate C++ file.
#if ENABLE_SERIALIZATION_TESTS
#include "NvBlastExtSerialization.h"
#include "NvBlastExtLlSerialization.h"
#include "NvBlastExtSerializationInternal.h"
#endif
#include "NvBlastExtAssetUtils.h"
#pragma warning( pop )
#include <fstream>
#include <iosfwd>
#ifdef WIN32
#include <windows.h>
#endif
template<int FailLevel, int Verbosity>
class AssetTest : public BlastBaseTest<FailLevel, Verbosity>
{
public:
AssetTest()
{
NvBlastTkFrameworkCreate();
}
~AssetTest()
{
NvBlastTkFrameworkGet()->release();
}
static void messageLog(int type, const char* msg, const char* file, int line)
{
BlastBaseTest<FailLevel, Verbosity>::messageLog(type, msg, file, line);
}
static void* alloc(size_t size)
{
return BlastBaseTest<FailLevel, Verbosity>::alignedZeroedAlloc(size);
}
static void free(void* mem)
{
BlastBaseTest<FailLevel, Verbosity>::alignedFree(mem);
}
void testSubtreeLeafChunkCounts(const Nv::Blast::Asset& a)
{
const NvBlastChunk* chunks = a.getChunks();
const uint32_t* subtreeLeafChunkCounts = a.getSubtreeLeafChunkCounts();
uint32_t totalLeafChunkCount = 0;
for (uint32_t chunkIndex = 0; chunkIndex < a.m_chunkCount; ++chunkIndex)
{
const NvBlastChunk& chunk = chunks[chunkIndex];
if (Nv::Blast::isInvalidIndex(chunk.parentChunkIndex))
{
totalLeafChunkCount += subtreeLeafChunkCounts[chunkIndex];
}
const bool isLeafChunk = chunk.firstChildIndex >= chunk.childIndexStop;
uint32_t subtreeLeafChunkCount = isLeafChunk ? 1 : 0;
for (uint32_t childIndex = chunk.firstChildIndex; childIndex < chunk.childIndexStop; ++childIndex)
{
subtreeLeafChunkCount += subtreeLeafChunkCounts[childIndex];
}
EXPECT_EQ(subtreeLeafChunkCount, subtreeLeafChunkCounts[chunkIndex]);
}
EXPECT_EQ(totalLeafChunkCount, a.m_leafChunkCount);
}
void testChunkToNodeMap(const Nv::Blast::Asset& a)
{
for (uint32_t chunkIndex = 0; chunkIndex < a.m_chunkCount; ++chunkIndex)
{
const uint32_t nodeIndex = a.getChunkToGraphNodeMap()[chunkIndex];
if (!Nv::Blast::isInvalidIndex(nodeIndex))
{
EXPECT_LT(nodeIndex, a.m_graph.m_nodeCount);
EXPECT_EQ(chunkIndex, a.m_graph.getChunkIndices()[nodeIndex]);
}
else
{
const uint32_t* chunkIndexStop = a.m_graph.getChunkIndices() + a.m_graph.m_nodeCount;
const uint32_t* it = std::find<const uint32_t*, uint32_t>(a.m_graph.getChunkIndices(), chunkIndexStop, chunkIndex);
EXPECT_EQ(chunkIndexStop, it);
}
}
}
NvBlastAsset* buildAsset(const ExpectedAssetValues& expected, const NvBlastAssetDesc* desc)
{
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(desc, messageLog));
void* mem = alloc(NvBlastGetAssetMemorySize(desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(mem, desc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
if (asset == nullptr)
{
free(mem);
return nullptr;
}
Nv::Blast::Asset& a = *(Nv::Blast::Asset*)asset;
EXPECT_EQ(expected.totalChunkCount, a.m_chunkCount);
EXPECT_EQ(expected.graphNodeCount, a.m_graph.m_nodeCount);
EXPECT_EQ(expected.bondCount, a.m_graph.getAdjacencyPartition()[a.m_graph.m_nodeCount] / 2);
EXPECT_EQ(expected.leafChunkCount, a.m_leafChunkCount);
EXPECT_EQ(expected.subsupportChunkCount, a.m_chunkCount - a.m_firstSubsupportChunkIndex);
testSubtreeLeafChunkCounts(a);
testChunkToNodeMap(a);
return asset;
}
void checkAssetsExpected(Nv::Blast::Asset& asset, const ExpectedAssetValues& expected)
{
EXPECT_EQ(expected.totalChunkCount, asset.m_chunkCount);
EXPECT_EQ(expected.graphNodeCount, asset.m_graph.m_nodeCount);
EXPECT_EQ(expected.bondCount, asset.m_graph.getAdjacencyPartition()[asset.m_graph.m_nodeCount] / 2);
EXPECT_EQ(expected.leafChunkCount, asset.m_leafChunkCount);
EXPECT_EQ(expected.subsupportChunkCount, asset.m_chunkCount - asset.m_firstSubsupportChunkIndex);
testSubtreeLeafChunkCounts(asset);
testChunkToNodeMap(asset);
}
// expects that the bond normal points from the lower indexed chunk to higher index chunk
// uses chunk.centroid
// convention, requirement from findClosestNode
void checkNormalDir(NvBlastChunkDesc* chunkDescs, size_t chunkDescCount, NvBlastBondDesc* bondDescs, size_t bondDescCount)
{
for (size_t bondIndex = 0; bondIndex < bondDescCount; ++bondIndex)
{
NvBlastBondDesc& bond = bondDescs[bondIndex];
uint32_t chunkIndex0 = bond.chunkIndices[0];
uint32_t chunkIndex1 = bond.chunkIndices[1];
bool swap = chunkIndex0 > chunkIndex1;
uint32_t testIndex0 = swap ? chunkIndex1 : chunkIndex0;
uint32_t testIndex1 = swap ? chunkIndex0 : chunkIndex1;
EXPECT_TRUE(testIndex0 < testIndex1);
// no convention for world chunks
if (!Nv::Blast::isInvalidIndex(testIndex0) && !Nv::Blast::isInvalidIndex(testIndex1))
{
NvBlastChunkDesc& chunk0 = chunkDescs[testIndex0];
NvBlastChunkDesc& chunk1 = chunkDescs[testIndex1];
float dir[3];
Nv::Blast::VecMath::sub(chunk1.centroid, chunk0.centroid, dir);
bool meetsConvention = Nv::Blast::VecMath::dot(bond.bond.normal, dir) > 0;
EXPECT_TRUE(meetsConvention);
if (!meetsConvention)
{
printf("bond %zd chunks(%d,%d): %.2f %.2f %.2f %.2f %.2f %.2f %d\n",
bondIndex, chunkIndex0, chunkIndex1,
bond.bond.normal[0], bond.bond.normal[1], bond.bond.normal[2],
dir[0], dir[1], dir[2],
Nv::Blast::VecMath::dot(bond.bond.normal, dir) > 0);
}
}
}
}
// expects that the bond normal points from the lower indexed node to higher index node
// uses chunk.centroid
// convention, requirement from findClosestNode
void checkNormalDir(const NvBlastSupportGraph graph, const NvBlastChunk* assetChunks, const NvBlastBond* assetBonds)
{
for (uint32_t nodeIndex = 0; nodeIndex < graph.nodeCount; nodeIndex++)
{
uint32_t adjStart = graph.adjacencyPartition[nodeIndex];
uint32_t adjStop = graph.adjacencyPartition[nodeIndex + 1];
for (uint32_t adj = adjStart; adj < adjStop; ++adj)
{
uint32_t adjNodeIndex = graph.adjacentNodeIndices[adj];
bool swap = nodeIndex > adjNodeIndex;
uint32_t testIndex0 = swap ? adjNodeIndex : nodeIndex;
uint32_t testIndex1 = swap ? nodeIndex : adjNodeIndex;
// no convention for world chunks
if (!Nv::Blast::isInvalidIndex(graph.chunkIndices[testIndex0]) && !Nv::Blast::isInvalidIndex(graph.chunkIndices[testIndex1]))
{
const NvBlastChunk& chunk0 = assetChunks[graph.chunkIndices[testIndex0]];
const NvBlastChunk& chunk1 = assetChunks[graph.chunkIndices[testIndex1]];
uint32_t bondIndex = graph.adjacentBondIndices[adj];
const NvBlastBond& bond = assetBonds[bondIndex];
float dir[3];
Nv::Blast::VecMath::sub(chunk1.centroid, chunk0.centroid, dir);
bool meetsConvention = Nv::Blast::VecMath::dot(bond.normal, dir) > 0;
EXPECT_TRUE(meetsConvention);
if (!meetsConvention)
{
printf("bond %d nodes(%d,%d): %.2f %.2f %.2f %.2f %.2f %.2f %d\n",
bondIndex, nodeIndex, adjNodeIndex,
bond.normal[0], bond.normal[1], bond.normal[2],
dir[0], dir[1], dir[2],
Nv::Blast::VecMath::dot(bond.normal, dir) > 0);
}
}
}
}
}
void checkNormalDir(const NvBlastAsset* asset)
{
const NvBlastChunk* assetChunks = NvBlastAssetGetChunks(asset, nullptr);
const NvBlastBond* assetBonds = NvBlastAssetGetBonds(asset, nullptr);
const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, nullptr);
checkNormalDir(graph, assetChunks, assetBonds);
}
void buildAssetShufflingDescriptors(const NvBlastAssetDesc* desc, const ExpectedAssetValues& expected, uint32_t shuffleCount, bool useTk)
{
NvBlastAssetDesc shuffledDesc = *desc;
std::vector<NvBlastChunkDesc> chunkDescs(desc->chunkDescs, desc->chunkDescs + desc->chunkCount);
shuffledDesc.chunkDescs = chunkDescs.data();
std::vector<NvBlastBondDesc> bondDescs(desc->bondDescs, desc->bondDescs + desc->bondCount);
shuffledDesc.bondDescs = bondDescs.data();
if (!useTk)
{
std::vector<char> scratch(desc->chunkCount);
NvBlastEnsureAssetExactSupportCoverage(chunkDescs.data(), desc->chunkCount, scratch.data(), messageLog);
}
else
{
NvBlastTkFrameworkGet()->ensureAssetExactSupportCoverage(chunkDescs.data(), desc->chunkCount);
}
for (uint32_t i = 0; i < shuffleCount; ++i)
{
checkNormalDir(chunkDescs.data(), chunkDescs.size(), bondDescs.data(), bondDescs.size());
shuffleAndFixChunkDescs(chunkDescs.data(), desc->chunkCount, bondDescs.data(), desc->bondCount, useTk);
checkNormalDir(chunkDescs.data(), chunkDescs.size(), bondDescs.data(), bondDescs.size());
NvBlastAsset* asset = buildAsset(expected, &shuffledDesc);
EXPECT_TRUE(asset != nullptr);
checkNormalDir(asset);
if (asset)
{
free(asset);
}
}
}
void shuffleAndFixChunkDescs(NvBlastChunkDesc* chunkDescs, uint32_t chunkDescCount, NvBlastBondDesc* bondDescs, uint32_t bondDescCount, bool useTk)
{
// Create reorder array and fill with identity map
std::vector<uint32_t> shuffledOrder(chunkDescCount);
for (uint32_t i = 0; i < chunkDescCount; ++i)
{
shuffledOrder[i] = i;
}
// An array into which to copy the reordered descs
std::vector<NvBlastChunkDesc> shuffledChunkDescs(chunkDescCount);
std::random_device rd;
std::mt19937 g(rd());
std::vector<char> scratch;
const uint32_t trials = 30;
uint32_t attempt = 0;
while(1)
{
// Shuffle the reorder array
std::shuffle(shuffledOrder.begin(), shuffledOrder.end(), g);
// Save initial bonds
std::vector<NvBlastBondDesc> savedBondDescs(bondDescs, bondDescs + bondDescCount);
// Shuffle chunks and bonds
NvBlastApplyAssetDescChunkReorderMap(shuffledChunkDescs.data(), chunkDescs, chunkDescCount, bondDescs, bondDescCount, shuffledOrder.data(), true, nullptr);
// All the normals are pointing in the expected direction (they have been swapped)
checkNormalDir(shuffledChunkDescs.data(), chunkDescCount, bondDescs, bondDescCount);
checkNormalDir(chunkDescs, chunkDescCount, savedBondDescs.data(), bondDescCount);
// Check the results
for (uint32_t i = 0; i < chunkDescCount; ++i)
{
EXPECT_EQ(chunkDescs[i].userData, shuffledChunkDescs[shuffledOrder[i]].userData);
EXPECT_TRUE(chunkDescs[i].parentChunkDescIndex > chunkDescCount || shuffledChunkDescs[shuffledOrder[i]].parentChunkDescIndex == shuffledOrder[chunkDescs[i].parentChunkDescIndex]);
}
for (uint32_t i = 0; i < bondDescCount; ++i)
{
for (uint32_t k = 0; k < 2; ++k)
{
if (!Nv::Blast::isInvalidIndex(savedBondDescs[i].chunkIndices[k]))
{
EXPECT_EQ(shuffledOrder[savedBondDescs[i].chunkIndices[k]], bondDescs[i].chunkIndices[k]);
}
}
}
// Try creating asset, usually it should fail (otherwise make another attempt)
NvBlastAssetDesc desc = { chunkDescCount, shuffledChunkDescs.data(), bondDescCount, bondDescs };
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, nullptr));
void* mem = alloc(NvBlastGetAssetMemorySize(&desc, nullptr));
NvBlastAsset* asset = NvBlastCreateAsset(mem, &desc, scratch.data(), nullptr);
if (asset == nullptr)
{
free(mem);
break;
}
else
{
free(asset);
memcpy(bondDescs, savedBondDescs.data(), sizeof(NvBlastBondDesc) * bondDescCount);
attempt++;
if (attempt >= trials)
{
GTEST_NONFATAL_FAILURE_("Shuffled chunk descs should fail asset creation (most of the time).");
break;
}
}
}
// Now we want to fix that order
if (!useTk)
{
std::vector<uint32_t> chunkReorderMap(chunkDescCount);
std::vector<char> scratch2(3 * chunkDescCount * sizeof(uint32_t));
const bool isIdentity = NvBlastBuildAssetDescChunkReorderMap(chunkReorderMap.data(), shuffledChunkDescs.data(), chunkDescCount, scratch2.data(), messageLog);
EXPECT_FALSE(isIdentity);
NvBlastApplyAssetDescChunkReorderMap(chunkDescs, shuffledChunkDescs.data(), chunkDescCount, bondDescs, bondDescCount, chunkReorderMap.data(), true, messageLog);
}
else
{
memcpy(chunkDescs, shuffledChunkDescs.data(), chunkDescCount * sizeof(NvBlastChunkDesc));
const bool isIdentity = NvBlastTkFrameworkGet()->reorderAssetDescChunks(chunkDescs, chunkDescCount, bondDescs, bondDescCount, nullptr, true);
EXPECT_FALSE(isIdentity);
}
}
void mergeAssetTest(const NvBlastAssetDesc& desc, bool fail)
{
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(mem, &desc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
if (asset == nullptr)
{
free(mem);
return;
}
// Merge two copies of this asset together
const NvBlastAsset* components[2] = { asset, asset };
const NvcVec3 translations[2] = { { 0, 0, 0 },{ 2, 0, 0 } };
const NvBlastBond bond = { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 };
NvBlastExtAssetUtilsBondDesc newBondDescs[4];
for (int i = 0; i < 4; ++i)
{
newBondDescs[i].bond = bond;
newBondDescs[i].chunkIndices[0] = 2 * (i + 1);
newBondDescs[i].chunkIndices[1] = 2 * i + 1;
newBondDescs[i].componentIndices[0] = 0;
newBondDescs[i].componentIndices[1] = 1;
}
// Create a merged descriptor
std::vector<uint32_t> chunkIndexOffsets(2);
std::vector<uint32_t> chunkReorderMap(2 * desc.chunkCount);
NvBlastAssetDesc mergedDesc = NvBlastExtAssetUtilsMergeAssets(components, nullptr, nullptr, translations, 2, newBondDescs, 4, chunkIndexOffsets.data(), chunkReorderMap.data(), 2 * desc.chunkCount);
EXPECT_EQ(2 * desc.bondCount + 4, mergedDesc.bondCount);
EXPECT_EQ(2 * desc.chunkCount, mergedDesc.chunkCount);
for (uint32_t i = 0; i < 2 * desc.chunkCount; ++i)
{
EXPECT_LT(chunkReorderMap[i], 2 * desc.chunkCount);
}
EXPECT_EQ(0, chunkIndexOffsets[0]);
EXPECT_EQ(desc.chunkCount, chunkIndexOffsets[1]);
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&mergedDesc, messageLog));
mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&mergedDesc, messageLog));
NvBlastAsset* mergedAsset = NvBlastCreateAsset(mem, &mergedDesc, scratch.data(), messageLog);
EXPECT_TRUE(mergedAsset != nullptr);
if (mergedAsset == nullptr)
{
free(mem);
return;
}
NVBLAST_FREE(const_cast<NvBlastBondDesc*>(mergedDesc.bondDescs));
NVBLAST_FREE(const_cast<NvBlastChunkDesc*>(mergedDesc.chunkDescs));
NVBLAST_FREE(mergedAsset);
if (!fail)
{
mergedDesc = NvBlastExtAssetUtilsMergeAssets(components, nullptr, nullptr, translations, 2, newBondDescs, 4, nullptr, chunkReorderMap.data(), 2 * desc.chunkCount);
EXPECT_EQ(2 * desc.bondCount + 4, mergedDesc.bondCount);
EXPECT_EQ(2 * desc.chunkCount, mergedDesc.chunkCount);
for (uint32_t i = 0; i < 2 * desc.chunkCount; ++i)
{
EXPECT_LT(chunkReorderMap[i], 2 * desc.chunkCount);
}
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&mergedDesc, messageLog));
mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&mergedDesc, messageLog));
mergedAsset = NvBlastCreateAsset(mem, &mergedDesc, scratch.data(), messageLog);
EXPECT_TRUE(mergedAsset != nullptr);
free(mem);
NVBLAST_FREE(const_cast<NvBlastBondDesc*>(mergedDesc.bondDescs));
NVBLAST_FREE(const_cast<NvBlastChunkDesc*>(mergedDesc.chunkDescs));
}
else
{
// We don't pass in a valid chunkReorderMap so asset creation should fail
mergedDesc = NvBlastExtAssetUtilsMergeAssets(components, nullptr, nullptr, translations, 2, newBondDescs, 4, chunkIndexOffsets.data(), nullptr, 0);
EXPECT_EQ(2 * desc.bondCount + 4, mergedDesc.bondCount);
EXPECT_EQ(2 * desc.chunkCount, mergedDesc.chunkCount);
EXPECT_EQ(0, chunkIndexOffsets[0]);
EXPECT_EQ(desc.chunkCount, chunkIndexOffsets[1]);
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&mergedDesc, messageLog));
mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&mergedDesc, messageLog));
mergedAsset = NvBlastCreateAsset(mem, &mergedDesc, scratch.data(), messageLog);
EXPECT_TRUE(mergedAsset == nullptr);
free(mem);
NVBLAST_FREE(const_cast<NvBlastBondDesc*>(mergedDesc.bondDescs));
NVBLAST_FREE(const_cast<NvBlastChunkDesc*>(mergedDesc.chunkDescs));
mergedDesc = NvBlastExtAssetUtilsMergeAssets(components, nullptr, nullptr, translations, 2, newBondDescs, 4, nullptr, nullptr, 0);
EXPECT_EQ(2 * desc.bondCount + 4, mergedDesc.bondCount);
EXPECT_EQ(2 * desc.chunkCount, mergedDesc.chunkCount);
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&mergedDesc, messageLog));
mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&mergedDesc, messageLog));
mergedAsset = NvBlastCreateAsset(mem, &mergedDesc, scratch.data(), messageLog);
EXPECT_TRUE(mergedAsset == nullptr);
free(mem);
NVBLAST_FREE(const_cast<NvBlastBondDesc*>(mergedDesc.bondDescs));
NVBLAST_FREE(const_cast<NvBlastChunkDesc*>(mergedDesc.chunkDescs));
// We lie and say the chunkReorderMap is not large enough. It should be filled with 0xFFFFFFFF up to the size we gave
mergedDesc = NvBlastExtAssetUtilsMergeAssets(components, nullptr, nullptr, translations, 2, newBondDescs, 4, nullptr, chunkReorderMap.data(), desc.chunkCount);
EXPECT_EQ(2 * desc.bondCount + 4, mergedDesc.bondCount);
EXPECT_EQ(2 * desc.chunkCount, mergedDesc.chunkCount);
for (uint32_t i = 0; i < desc.chunkCount; ++i)
{
EXPECT_TRUE(Nv::Blast::isInvalidIndex(chunkReorderMap[i]));
}
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&mergedDesc, messageLog));
mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&mergedDesc, messageLog));
mergedAsset = NvBlastCreateAsset(mem, &mergedDesc, scratch.data(), messageLog);
EXPECT_TRUE(mergedAsset == nullptr);
free(mem);
NVBLAST_FREE(const_cast<NvBlastBondDesc*>(mergedDesc.bondDescs));
NVBLAST_FREE(const_cast<NvBlastChunkDesc*>(mergedDesc.chunkDescs));
mergedDesc = NvBlastExtAssetUtilsMergeAssets(components, nullptr, nullptr, translations, 2, newBondDescs, 4, chunkIndexOffsets.data(), chunkReorderMap.data(), desc.chunkCount);
EXPECT_EQ(2 * desc.bondCount + 4, mergedDesc.bondCount);
EXPECT_EQ(2 * desc.chunkCount, mergedDesc.chunkCount);
for (uint32_t i = 0; i < desc.chunkCount; ++i)
{
EXPECT_TRUE(Nv::Blast::isInvalidIndex(chunkReorderMap[i]));
}
EXPECT_EQ(0, chunkIndexOffsets[0]);
EXPECT_EQ(desc.chunkCount, chunkIndexOffsets[1]);
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&mergedDesc, messageLog));
mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&mergedDesc, messageLog));
mergedAsset = NvBlastCreateAsset(mem, &mergedDesc, scratch.data(), messageLog);
EXPECT_TRUE(mergedAsset == nullptr);
free(mem);
NVBLAST_FREE(const_cast<NvBlastBondDesc*>(mergedDesc.bondDescs));
NVBLAST_FREE(const_cast<NvBlastChunkDesc*>(mergedDesc.chunkDescs));
}
// Finally free the original asset
NVBLAST_FREE(asset);
}
};
typedef AssetTest<-1, 0> AssetTestAllowErrorsSilently;
typedef AssetTest<NvBlastMessage::Error, 0> AssetTestAllowWarningsSilently;
typedef AssetTest<NvBlastMessage::Error, 1> AssetTestAllowWarnings;
typedef AssetTest<NvBlastMessage::Warning, 1> AssetTestStrict;
TEST_F(AssetTestStrict, BuildAssets)
{
const uint32_t assetDescCount = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
std::vector<NvBlastAsset*> assets(assetDescCount);
// Build
for (uint32_t i = 0; i < assetDescCount; ++i)
{
assets[i] = buildAsset(g_assetExpectedValues[i], &g_assetDescs[i]);
}
// Destroy
for (uint32_t i = 0; i < assetDescCount; ++i)
{
if (assets[i])
{
free(assets[i]);
}
}
}
#if ENABLE_SERIALIZATION_TESTS
TEST_F(AssetTestStrict, SerializeAssets)
{
Nv::Blast::ExtSerialization* ser = NvBlastExtSerializationCreate();
EXPECT_TRUE(ser != nullptr);
const uint32_t assetDescCount = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
std::vector<Nv::Blast::Asset*> assets(assetDescCount);
// Build
for (uint32_t i = 0; i < assetDescCount; ++i)
{
assets[i] = reinterpret_cast<Nv::Blast::Asset*>(buildAsset(g_assetExpectedValues[i], &g_assetDescs[i]));
}
// Serialize them
for (Nv::Blast::Asset* asset : assets)
{
void* buffer;
const uint64_t size = NvBlastExtSerializationSerializeAssetIntoBuffer(buffer, *ser, asset);
EXPECT_TRUE(size != 0);
uint32_t objectTypeID;
uint32_t encodingID;
uint64_t dataSize = 0;
EXPECT_TRUE(ser->peekHeader(&objectTypeID, &encodingID, &dataSize, buffer, size));
EXPECT_EQ(objectTypeID, Nv::Blast::LlObjectTypeID::Asset);
EXPECT_EQ(encodingID, ser->getSerializationEncoding());
EXPECT_EQ(dataSize + Nv::Blast::ExtSerializationInternal::HeaderSize, size);
}
// Destroy
for (uint32_t i = 0; i < assetDescCount; ++i)
{
if (assets[i])
{
free(assets[i]);
}
}
ser->release();
}
TEST_F(AssetTestStrict, SerializeAssetsRoundTrip)
{
Nv::Blast::ExtSerialization* ser = NvBlastExtSerializationCreate();
EXPECT_TRUE(ser != nullptr);
const uint32_t assetDescCount = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
std::vector<Nv::Blast::Asset*> assets(assetDescCount);
// Build
for (uint32_t i = 0; i < assetDescCount; ++i)
{
assets[i] = reinterpret_cast<Nv::Blast::Asset*>(buildAsset(g_assetExpectedValues[i], &g_assetDescs[i]));
}
const uint32_t encodings[] =
{
Nv::Blast::ExtSerialization::EncodingID::CapnProtoBinary,
Nv::Blast::ExtSerialization::EncodingID::RawBinary
};
for (auto encoding : encodings)
{
ser->setSerializationEncoding(encoding);
// Serialize them
for (uint32_t i = 0; i < assetDescCount; ++i)
{
Nv::Blast::Asset* asset = assets[i];
void* buffer;
const uint64_t size = NvBlastExtSerializationSerializeAssetIntoBuffer(buffer, *ser, asset);
EXPECT_TRUE(size != 0);
Nv::Blast::Asset* rtAsset = reinterpret_cast<Nv::Blast::Asset*>(ser->deserializeFromBuffer(buffer, size));
//TODO: Compare assets
checkAssetsExpected(*rtAsset, g_assetExpectedValues[i]);
free(static_cast<void*>(rtAsset));
}
}
// Destroy
for (uint32_t i = 0; i < assetDescCount; ++i)
{
if (assets[i])
{
free(assets[i]);
}
}
ser->release();
}
TEST_F(AssetTestStrict, SerializeAssetsRoundTripWithSkipping)
{
Nv::Blast::ExtSerialization* ser = NvBlastExtSerializationCreate();
EXPECT_TRUE(ser != nullptr);
std::vector<char> stream;
class StreamBufferProvider : public Nv::Blast::ExtSerialization::BufferProvider
{
public:
StreamBufferProvider(std::vector<char>& stream) : m_stream(stream), m_cursor(0) {}
virtual void* requestBuffer(size_t size) override
{
m_stream.resize(m_cursor + size);
void* data = m_stream.data() + m_cursor;
m_cursor += size;
return data;
}
private:
std::vector<char>& m_stream;
size_t m_cursor;
} myStreamProvider(stream);
ser->setBufferProvider(&myStreamProvider);
const uint32_t assetDescCount = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
std::vector<Nv::Blast::Asset*> assets(assetDescCount);
// Build
for (uint32_t i = 0; i < assetDescCount; ++i)
{
assets[i] = reinterpret_cast<Nv::Blast::Asset*>(buildAsset(g_assetExpectedValues[i], &g_assetDescs[i]));
}
const uint32_t encodings[] =
{
Nv::Blast::ExtSerialization::EncodingID::CapnProtoBinary,
Nv::Blast::ExtSerialization::EncodingID::RawBinary
};
for (auto encoding : encodings)
{
ser->setSerializationEncoding(encoding);
// Serialize them
for (uint32_t i = 0; i < assetDescCount; ++i)
{
void* buffer;
const uint64_t size = NvBlastExtSerializationSerializeAssetIntoBuffer(buffer, *ser, assets[i]);
EXPECT_TRUE(size != 0);
}
}
// Deserialize from stream
const void* buffer = stream.data();
uint64_t bufferSize = stream.size();
for (uint32_t assetCount = 0; bufferSize; ++assetCount)
{
uint32_t objectTypeID;
uint32_t encodingID;
const bool peekSuccess = ser->peekHeader(&objectTypeID, &encodingID, nullptr, buffer, bufferSize);
EXPECT_TRUE(peekSuccess);
if (!peekSuccess)
{
break;
}
EXPECT_EQ(Nv::Blast::LlObjectTypeID::Asset, objectTypeID);
if (assetCount < assetDescCount)
{
EXPECT_EQ(Nv::Blast::ExtSerialization::EncodingID::CapnProtoBinary, encodingID);
}
else
{
EXPECT_EQ(Nv::Blast::ExtSerialization::EncodingID::RawBinary, encodingID);
}
const bool skip = (assetCount & 1) != 0;
if (!skip)
{
const uint32_t assetnum = assetCount % assetDescCount;
Nv::Blast::Asset* rtAsset = reinterpret_cast<Nv::Blast::Asset*>(ser->deserializeFromBuffer(buffer, bufferSize));
EXPECT_TRUE(rtAsset != nullptr);
if (rtAsset == nullptr)
{
break;
}
//TODO: Compare assets
checkAssetsExpected(*rtAsset, g_assetExpectedValues[assetnum]);
free(static_cast<void*>(rtAsset));
}
buffer = ser->skipObject(bufferSize, buffer);
}
// Destroy
for (uint32_t i = 0; i < assetDescCount; ++i)
{
if (assets[i])
{
free(assets[i]);
}
}
ser->release();
}
#endif // ENABLE_SERIALIZATION_TESTS
TEST_F(AssetTestAllowWarnings, BuildAssetsMissingCoverage)
{
const uint32_t assetDescCount = sizeof(g_assetDescsMissingCoverage) / sizeof(g_assetDescsMissingCoverage[0]);
std::vector<NvBlastAsset*> assets(assetDescCount);
// Build
for (uint32_t i = 0; i < assetDescCount; ++i)
{
const NvBlastAssetDesc* desc = &g_assetDescsMissingCoverage[i];
NvBlastAssetDesc fixedDesc = *desc;
std::vector<NvBlastChunkDesc> chunkDescs(desc->chunkDescs, desc->chunkDescs + desc->chunkCount);
std::vector<NvBlastBondDesc> bondDescs(desc->bondDescs, desc->bondDescs + desc->bondCount);
std::vector<uint32_t> chunkReorderMap(desc->chunkCount);
std::vector<char> scratch(desc->chunkCount * sizeof(NvBlastChunkDesc));
const bool changedCoverage = !NvBlastEnsureAssetExactSupportCoverage(chunkDescs.data(), fixedDesc.chunkCount, scratch.data(), messageLog);
EXPECT_TRUE(changedCoverage);
NvBlastReorderAssetDescChunks(chunkDescs.data(), fixedDesc.chunkCount, bondDescs.data(), fixedDesc.bondCount, chunkReorderMap.data(), true, scratch.data(), messageLog);
fixedDesc.chunkDescs = chunkDescs.data();
fixedDesc.bondDescs = bondDescs.data();
assets[i] = buildAsset(g_assetsFromMissingCoverageExpectedValues[i], &fixedDesc);
}
// Destroy
for (uint32_t i = 0; i < assetDescCount; ++i)
{
if (assets[i])
{
free(assets[i]);
}
}
}
TEST_F(AssetTestAllowWarningsSilently, BuildAssetsShufflingChunkDescriptors)
{
for (uint32_t i = 0; i < sizeof(g_assetDescs) / sizeof(g_assetDescs[0]); ++i)
{
buildAssetShufflingDescriptors(&g_assetDescs[i], g_assetExpectedValues[i], 10, false);
}
for (uint32_t i = 0; i < sizeof(g_assetDescsMissingCoverage) / sizeof(g_assetDescsMissingCoverage[0]); ++i)
{
buildAssetShufflingDescriptors(&g_assetDescsMissingCoverage[i], g_assetsFromMissingCoverageExpectedValues[i], 10, false);
}
}
TEST_F(AssetTestAllowWarningsSilently, BuildAssetsShufflingChunkDescriptorsUsingTk)
{
for (uint32_t i = 0; i < sizeof(g_assetDescs) / sizeof(g_assetDescs[0]); ++i)
{
buildAssetShufflingDescriptors(&g_assetDescs[i], g_assetExpectedValues[i], 10, true);
}
for (uint32_t i = 0; i < sizeof(g_assetDescsMissingCoverage) / sizeof(g_assetDescsMissingCoverage[0]); ++i)
{
buildAssetShufflingDescriptors(&g_assetDescsMissingCoverage[i], g_assetsFromMissingCoverageExpectedValues[i], 10, true);
}
}
TEST_F(AssetTestStrict, MergeAssetsUpperSupportOnly)
{
mergeAssetTest(g_assetDescs[0], false);
}
TEST_F(AssetTestStrict, MergeAssetsWithSubsupport)
{
mergeAssetTest(g_assetDescs[1], false);
}
TEST_F(AssetTestStrict, MergeAssetsWithWorldBondsUpperSupportOnly)
{
mergeAssetTest(g_assetDescs[3], false);
}
TEST_F(AssetTestStrict, MergeAssetsWithWorldBondsWithSubsupport)
{
mergeAssetTest(g_assetDescs[4], false);
}
TEST_F(AssetTestAllowErrorsSilently, MergeAssetsUpperSupportOnlyExpectFail)
{
mergeAssetTest(g_assetDescs[0], true);
}
TEST_F(AssetTestAllowErrorsSilently, MergeAssetsWithSubsupportExpectFail)
{
mergeAssetTest(g_assetDescs[1], true);
}
TEST_F(AssetTestAllowErrorsSilently, MergeAssetsWithWorldBondsUpperSupportOnlyExpectFail)
{
mergeAssetTest(g_assetDescs[3], true);
}
TEST_F(AssetTestAllowErrorsSilently, MergeAssetsWithWorldBondsWithSubsupportExpectFail)
{
mergeAssetTest(g_assetDescs[4], true);
}
| 34,666 | C++ | 39.216937 | 205 | 0.633849 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/MultithreadingTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "BlastBaseTest.h"
#include "AssetGenerator.h"
#include <iostream>
#include <memory>
#include "TaskDispatcher.h"
#include "NvBlastActor.h"
#include "NvBlastExtDamageShaders.h"
typedef std::function<void(const Nv::Blast::Actor&, NvBlastLog)> ActorTestFunction;
typedef std::function<void(std::vector<NvBlastActor*>&, NvBlastLog)> PostDamageTestFunction;
static void blast(std::set<NvBlastActor*>& actorsToDamage, GeneratorAsset* testAsset, GeneratorAsset::Vec3 localPos, float minRadius, float maxRadius, float compressiveDamage)
{
std::vector<NvBlastChunkFractureData> chunkEvents; /* num lower-support chunks + bonds */
std::vector<NvBlastBondFractureData> bondEvents; /* num lower-support chunks + bonds */
chunkEvents.resize(testAsset->solverChunks.size());
bondEvents.resize(testAsset->solverBonds.size());
NvBlastFractureBuffers events = { static_cast<uint32_t>(bondEvents.size()), static_cast<uint32_t>(chunkEvents.size()), bondEvents.data(), chunkEvents.data() };
std::vector<float> scratch(chunkEvents.size() + bondEvents.size(), 0.0f);
std::vector<char> splitScratch;
std::vector<NvBlastActor*> newActorsBuffer(testAsset->solverChunks.size());
NvBlastExtRadialDamageDesc damage = {
compressiveDamage,
{ localPos.x, localPos.y, localPos.z },
minRadius,
maxRadius
};
NvBlastExtProgramParams programParams =
{
&damage,
nullptr
};
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
nullptr
};
size_t totalNewActorsCount = 0;
for (std::set<NvBlastActor*>::iterator k = actorsToDamage.begin(); k != actorsToDamage.end();)
{
NvBlastActor* actor = *k;
NvBlastActorGenerateFracture(&events, actor, program, &programParams, nullptr, nullptr);
NvBlastActorApplyFracture(&events, actor, &events, nullptr, nullptr);
bool removeActor = false;
if (events.bondFractureCount + events.chunkFractureCount > 0)
{
NvBlastActorSplitEvent splitEvent;
splitEvent.newActors = &newActorsBuffer.data()[totalNewActorsCount];
uint32_t newActorSize = (uint32_t)(newActorsBuffer.size() - totalNewActorsCount);
splitScratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, nullptr));
const size_t newActorsCount = NvBlastActorSplit(&splitEvent, actor, newActorSize, splitScratch.data(), nullptr, nullptr);
totalNewActorsCount += newActorsCount;
removeActor = splitEvent.deletedActor != NULL;
}
if (removeActor)
{
k = actorsToDamage.erase(k);
}
else
{
++k;
}
}
for (size_t i = 0; i < totalNewActorsCount; ++i)
{
actorsToDamage.insert(newActorsBuffer[i]);
}
}
template<int FailLevel, int Verbosity>
class MultithreadingTest : public BlastBaseTest<FailLevel, Verbosity>
{
public:
MultithreadingTest()
{
}
static void messageLog(int type, const char* msg, const char* file, int line)
{
BlastBaseTest<FailLevel, Verbosity>::messageLog(type, msg, file, line);
}
static void* alloc(size_t size)
{
return BlastBaseTest<FailLevel, Verbosity>::alignedZeroedAlloc(size);
}
static void free(void* mem)
{
BlastBaseTest<FailLevel, Verbosity>::alignedFree(mem);
}
static void testActorVisibleChunks(const Nv::Blast::Actor& actor, NvBlastLog)
{
const Nv::Blast::Asset& asset = *actor.getAsset();
const NvBlastChunk* chunks = asset.getChunks();
if (actor.isSubSupportChunk())
{
EXPECT_EQ(1, actor.getVisibleChunkCount());
const uint32_t firstVisibleChunkIndex = (uint32_t)Nv::Blast::Actor::VisibleChunkIt(actor);
EXPECT_EQ(actor.getIndex() - asset.m_graph.m_nodeCount, firstVisibleChunkIndex - asset.m_firstSubsupportChunkIndex);
// Make sure the visible chunk is subsupport
// Array of support flags
std::vector<bool> isSupport(asset.m_chunkCount, false);
for (uint32_t i = 0; i < asset.m_graph.m_nodeCount; ++i)
{
isSupport[asset.m_graph.getChunkIndices()[i]] = true;
}
// Climb hierarchy to find support chunk
uint32_t chunkIndex = firstVisibleChunkIndex;
while (chunkIndex != Nv::Blast::invalidIndex<uint32_t>())
{
if (isSupport[chunkIndex])
{
break;
}
chunkIndex = chunks[chunkIndex].parentChunkIndex;
}
EXPECT_FALSE(Nv::Blast::isInvalidIndex(chunkIndex));
}
else
{
// Array of visibility flags
std::vector<bool> isVisible(asset.m_chunkCount, false);
for (Nv::Blast::Actor::VisibleChunkIt i = actor; (bool)i; ++i)
{
isVisible[(uint32_t)i] = true;
}
// Mark visible nodes representing graph chunks
std::vector<bool> visibleChunkFound(asset.m_chunkCount, false);
// Make sure every graph chunk is represented by a visible chunk
for (Nv::Blast::Actor::GraphNodeIt i = actor; (bool)i; ++i)
{
const uint32_t graphNodeIndex = (uint32_t)i;
uint32_t chunkIndex = asset.m_graph.getChunkIndices()[graphNodeIndex];
// Climb hierarchy to find visible chunk
while (chunkIndex != Nv::Blast::invalidIndex<uint32_t>())
{
// Check that chunk owners are accurate
EXPECT_EQ(actor.getIndex(), actor.getFamilyHeader()->getChunkActorIndices()[chunkIndex]);
if (isVisible[chunkIndex])
{
visibleChunkFound[chunkIndex] = true;
break;
}
chunkIndex = chunks[chunkIndex].parentChunkIndex;
}
EXPECT_FALSE(Nv::Blast::isInvalidIndex(chunkIndex));
}
// Check that all visible chunks are accounted for
for (uint32_t i = 0; i < asset.m_chunkCount; ++i)
{
EXPECT_EQ(visibleChunkFound[i], isVisible[i]);
}
// Make sure that, if all siblings are intact, they are invisible
for (uint32_t i = 0; i < asset.m_chunkCount; ++i)
{
bool allIntact = true;
bool noneVisible = true;
if (chunks[i].firstChildIndex < asset.getUpperSupportChunkCount()) // Do not check subsupport
{
for (uint32_t j = chunks[i].firstChildIndex; j < chunks[i].childIndexStop; ++j)
{
allIntact = allIntact && actor.getFamilyHeader()->getChunkActorIndices()[j] == actor.getIndex();
noneVisible = noneVisible && !isVisible[j];
}
EXPECT_TRUE(!allIntact || noneVisible);
}
}
}
}
class DamageActorTask : public TaskDispatcher::Task
{
public:
DamageActorTask(NvBlastActor* actor, GeneratorAsset* asset, GeneratorAsset::Vec3 localPos, float minRadius, float maxRadius, float compressiveDamage, ActorTestFunction testFunction)
: m_asset(asset)
, m_localPos(localPos)
, m_minRadius(minRadius)
, m_maxRadius(maxRadius)
, m_compressiveDamage(compressiveDamage)
, m_testFunction(testFunction)
{
m_actors.insert(actor);
}
virtual void process()
{
blast(m_actors, m_asset, m_localPos, m_minRadius, m_maxRadius, m_compressiveDamage);
// Test individual actors
if (m_testFunction != nullptr)
{
for (std::set<NvBlastActor*>::iterator k = m_actors.begin(); k != m_actors.end(); ++k)
{
m_testFunction(*static_cast<Nv::Blast::Actor*>(*k), messageLog);
}
}
}
const std::set<NvBlastActor*>& getResult() const { return m_actors; }
private:
std::set<NvBlastActor*> m_actors;
GeneratorAsset* m_asset;
GeneratorAsset::Vec3 m_localPos;
float m_minRadius;
float m_maxRadius;
float m_compressiveDamage;
ActorTestFunction m_testFunction;
std::vector<NvBlastActor*> m_resultActors;
};
void damageLeafSupportActorsParallelized
(
uint32_t assetCount,
uint32_t minChunkCount,
uint32_t damageCount,
uint32_t threadCount,
ActorTestFunction actorTestFunction,
PostDamageTestFunction postDamageTestFunction
)
{
const float relativeDamageRadius = 0.05f;
const float compressiveDamage = 1.0f;
srand(0);
std::cout << "Asset # (out of " << assetCount << "): ";
for (uint32_t assetNum = 0; assetNum < assetCount; ++assetNum)
{
std::cout << assetNum + 1 << ".. ";
CubeAssetGenerator::Settings settings;
settings.extents = GeneratorAsset::Vec3(1, 1, 1);
CubeAssetGenerator::DepthInfo depthInfo;
depthInfo.slicesPerAxis = GeneratorAsset::Vec3(1, 1, 1);
depthInfo.flag = NvBlastChunkDesc::Flags::NoFlags;
settings.depths.push_back(depthInfo);
uint32_t chunkCount = 1;
while (chunkCount < minChunkCount)
{
uint32_t chunkMul;
do
{
depthInfo.slicesPerAxis = GeneratorAsset::Vec3((float)(1 + rand() % 4), (float)(1 + rand() % 4), (float)(1 + rand() % 4));
chunkMul = (uint32_t)(depthInfo.slicesPerAxis.x * depthInfo.slicesPerAxis.y * depthInfo.slicesPerAxis.z);
} while (chunkMul == 1);
chunkCount *= chunkMul;
settings.depths.push_back(depthInfo);
settings.extents = settings.extents * depthInfo.slicesPerAxis;
}
settings.depths.back().flag = NvBlastChunkDesc::SupportFlag; // Leaves are support
// Make largest direction unit size
settings.extents = settings.extents * (1.0f / std::max(settings.extents.x, std::max(settings.extents.y, settings.extents.z)));
// Create asset
GeneratorAsset testAsset;
CubeAssetGenerator::generate(testAsset, settings);
NvBlastAssetDesc desc;
desc.chunkDescs = &testAsset.solverChunks[0];
desc.chunkCount = (uint32_t)testAsset.solverChunks.size();
desc.bondDescs = testAsset.solverBonds.data();
desc.bondCount = (uint32_t)testAsset.solverBonds.size();
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* mem = alloc(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(mem, &desc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, nullptr); // Using zeroingAlloc in case actorTest compares memory blocks
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// Run parallelized damage through TaskDispatcher
std::set<NvBlastActor*> resultActors;
{
uint32_t damageNum = 0;
// create DamageActorTask and it to dispatcher helper function
auto addDamageTaskFunction = [&](TaskDispatcher& dispatcher, NvBlastActor* actor)
{
GeneratorAsset::Vec3 localPos = settings.extents*GeneratorAsset::Vec3((float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f);
auto newTask = std::unique_ptr<DamageActorTask>(new DamageActorTask(actor, &testAsset, localPos, relativeDamageRadius, relativeDamageRadius*1.2f, compressiveDamage, actorTestFunction));
dispatcher.addTask(std::move(newTask));
};
// on task finished function for dispatcher (main thread)
TaskDispatcher::OnTaskFinishedFunction onTaskFinishedFunction = [&](TaskDispatcher& dispatcher, std::unique_ptr<TaskDispatcher::Task> task) {
const DamageActorTask* damageTask = static_cast<const DamageActorTask*>(task.get());
const std::set<NvBlastActor*>& actors = damageTask->getResult();
for (NvBlastActor* actor : actors)
{
if (damageNum >= damageCount)
{
resultActors.insert(actor);
}
else
{
damageNum++;
addDamageTaskFunction(dispatcher, actor);
}
}
};
// create dispatcher, add first task and run
TaskDispatcher dispatcher(threadCount, onTaskFinishedFunction);
addDamageTaskFunction(dispatcher, actor);
dispatcher.process();
}
// Test fractured actor set
if (postDamageTestFunction)
{
std::vector<NvBlastActor*> actorArray(resultActors.begin(), resultActors.end());
postDamageTestFunction(actorArray, messageLog);
}
// Release remaining actors
for (std::set<NvBlastActor*>::iterator k = resultActors.begin(); k != resultActors.end(); ++k)
{
NvBlastActorDeactivate(*k, messageLog);
}
resultActors.clear();
const uint32_t actorCount = NvBlastFamilyGetActorCount(family, messageLog);
EXPECT_TRUE(actorCount == 0);
free(family);
// Release asset data
free(asset);
}
std::cout << "done.\n";
}
};
// Specializations
typedef MultithreadingTest<NvBlastMessage::Error, 1> MultithreadingTestAllowWarnings;
typedef MultithreadingTest<NvBlastMessage::Error, 1> MultithreadingTestStrict;
TEST_F(MultithreadingTestStrict, MultithreadingTestDamageLeafSupportActorsTestVisibility)
{
damageLeafSupportActorsParallelized(1, 1000, 50, 4, testActorVisibleChunks, nullptr);
}
TEST_F(MultithreadingTestStrict, MultithreadingTestDamageLeafSupportActors)
{
damageLeafSupportActorsParallelized(1, 3000, 1000, 4, nullptr, nullptr);
}
| 17,045 | C++ | 39.975961 | 205 | 0.604752 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/TkCompositeTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "TkBaseTest.h"
#include <map>
#include <random>
#include <algorithm>
#include "NsMemoryBuffer.h"
#include "NvBlastTime.h"
/*
Composite and joint tests:
0) Test serialization of composites and assemblies
1) Create assembly, actors and joints should be created automatically
2) Create an actor with internal joints. Splitting the actor should cause joint create events to be dispatched
3) Joint update events should be fired when attached actors change
4) Joint delete events should be fired when at least one attached actor is deleted
5) Creating a composite from assets with internal joints should have expected behaviors (1-4) above
*/
struct Composite
{
std::vector<TkActorDesc> m_actorDescs;
std::vector<nvidia::NvTransform> m_relTMs;
std::vector<TkJointDesc> m_jointDescs;
};
template<int FailLevel, int Verbosity>
class TkCompositeTest : public TkBaseTest<FailLevel, Verbosity>
{
public:
// Composite/joint tests
void createAssembly(std::vector<TkActor*>& actors, std::vector<TkJoint*>& joints, bool createNRFJoints)
{
TkFramework* fw = NvBlastTkFrameworkGet();
actors.resize(4, nullptr);
actors[0] = fw->createActor(TkActorDesc(testAssets[0]));
actors[1] = fw->createActor(TkActorDesc(testAssets[0]));
actors[2] = fw->createActor(TkActorDesc(testAssets[1]));
actors[3] = fw->createActor(TkActorDesc(testAssets[1]));
std::vector<TkFamily*> families(4);
families[0] = &actors[0]->getFamily();
families[1] = &actors[1]->getFamily();
families[2] = &actors[2]->getFamily();
families[3] = &actors[3]->getFamily();
EXPECT_FALSE(actors[0] == nullptr);
EXPECT_FALSE(actors[1] == nullptr);
EXPECT_FALSE(actors[2] == nullptr);
EXPECT_FALSE(actors[3] == nullptr);
const TkJointDesc jointDescsNoNRF[8] =
{
// Actor indices, chunk indices, attach position in the composite frame
{ { families[0], families[1] }, { 6, 5 }, { NvVec3(0.0f, -1.5f, 0.5f), NvVec3(0.0f, -1.5f, 0.5f) } },
{ { families[0], families[1] }, { 4, 3 }, { NvVec3(0.0f, -0.5f, -0.5f), NvVec3(0.0f, -0.5f, -0.5f) } },
{ { families[0], families[2] }, { 8, 6 }, { NvVec3(-0.5f, 0.0f, 0.5f), NvVec3(-0.5f, 0.0f, 0.5f) } },
{ { families[0], families[2] }, { 3, 1 }, { NvVec3(-1.5f, 0.0f, -0.5f), NvVec3(-1.5f, 0.0f, -0.5f) } },
{ { families[1], families[3] }, { 7, 5 }, { NvVec3(0.5f, 0.0f, 0.5f), NvVec3(0.5f, 0.0f, 0.5f) } },
{ { families[1], families[3] }, { 4, 2 }, { NvVec3(1.0f, 0.0f, -0.5f), NvVec3(1.0f, 0.0f, -0.5f) } },
{ { families[2], families[3] }, { 8, 7 }, { NvVec3(0.0f, 1.5f, 0.5f), NvVec3(0.0f, 1.5f, 0.5f) } },
{ { families[2], families[3] }, { 2, 1 }, { NvVec3(0.0f, 0.5f, -0.5f), NvVec3(0.0f, 0.5f, -0.5f) } }
};
const TkJointDesc jointDescsWithNRF[12] =
{
// Actor indices, chunk indices, attach position in the composite frame
{ { families[0], families[1] }, { 6, 5 }, { NvVec3(0.0f, -1.5f, 0.5f), NvVec3(0.0f, -1.5f, 0.5f) } },
{ { families[0], families[1] }, { 4, 3 }, { NvVec3(0.0f, -0.5f, -0.5f), NvVec3(0.0f, -0.5f, -0.5f) } },
{ { families[0], nullptr }, { 8, 0xFFFFFFFF }, { NvVec3(-0.5f, 0.0f, 0.5f), NvVec3(-0.5f, 0.0f, 0.5f) } },
{ { families[0], nullptr }, { 3, 0xFFFFFFFF }, { NvVec3(-1.5f, 0.0f, -0.5f), NvVec3(-1.5f, 0.0f, -0.5f) } },
{ { nullptr, families[2] }, { 0xFFFFFFFF, 6 }, { NvVec3(-0.5f, 0.0f, 0.5f), NvVec3(-0.5f, 0.0f, 0.5f) } },
{ { nullptr, families[2] }, { 0xFFFFFFFF, 1 }, { NvVec3(-1.5f, 0.0f, -0.5f), NvVec3(-1.5f, 0.0f, -0.5f) } },
{ { families[1], nullptr }, { 7, 0xFFFFFFFF }, { NvVec3(0.5f, 0.0f, 0.5f), NvVec3(0.5f, 0.0f, 0.5f) } },
{ { families[1], nullptr }, { 4, 0xFFFFFFFF }, { NvVec3(1.0f, 0.0f, -0.5f), NvVec3(1.0f, 0.0f, -0.5f) } },
{ { nullptr, families[3] }, { 0xFFFFFFFF, 5 }, { NvVec3(0.5f, 0.0f, 0.5f), NvVec3(0.5f, 0.0f, 0.5f) } },
{ { nullptr, families[3] }, { 0xFFFFFFFF, 2 }, { NvVec3(1.0f, 0.0f, -0.5f), NvVec3(1.0f, 0.0f, -0.5f) } },
{ { families[2], families[3] }, { 8, 7 }, { NvVec3(0.0f, 1.5f, 0.5f), NvVec3(0.0f, 1.5f, 0.5f) } },
{ { families[2], families[3] }, { 2, 1 }, { NvVec3(0.0f, 0.5f, -0.5f), NvVec3(0.0f, 0.5f, -0.5f), } }
};
const TkJointDesc* jointDescs = createNRFJoints ? jointDescsWithNRF : jointDescsNoNRF;
const int jointCount = createNRFJoints ? 12 : 8;
joints.resize(jointCount, nullptr);
for (int i = 0; i < jointCount; ++i)
{
joints[i] = fw->createJoint(jointDescs[i]);
EXPECT_FALSE(joints[i] == nullptr);
}
}
void familySerialization(std::vector<TkFamily*>& families, TestFamilyTracker& tracker)
{
#if 1
NV_UNUSED(families);
NV_UNUSED(tracker);
#else
TkFramework* fw = NvBlastTkFrameworkGet();
PsMemoryBuffer* membuf = NVBLAST_NEW(PsMemoryBuffer);
EXPECT_TRUE(membuf != nullptr);
if (membuf == nullptr)
{
return;
}
std::vector<TkFamily*> oldFamilies = families;
for (size_t familyNum = 0; familyNum < families.size(); ++familyNum)
{
GTEST_FATAL_FAILURE_("Serialization of families needs to be put into extensions.");
// families[familyNum]->serialize(*membuf);
}
for (size_t familyNum = 0; familyNum < families.size(); ++familyNum)
{
TkFamily* f = families[familyNum];
std::vector<TkActor*> actors(f->getActorCount());
f->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (auto a : actors)
{
tracker.eraseActor(a);
}
f->release();
families[familyNum] = nullptr;
}
for (size_t familyNum = 0; familyNum < families.size(); ++familyNum)
{
GTEST_FATAL_FAILURE_("Deserialization of families needs to be put into extensions.");
// TkFamily* f = reinterpret_cast<TkFamily*>(fw->deserialize(*membuf));
// f->addListener(tracker);
// families[familyNum] = f;
}
for (size_t familyNum = 0; familyNum < families.size(); ++familyNum)
{
TkFamily* f = families[familyNum];
std::vector<TkActor*> actors(f->getActorCount());
f->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (auto a : actors)
{
tracker.insertActor(a);
std::vector<TkJoint*> joints(a->getJointCount());
a->getJoints(joints.data(), (uint32_t)joints.size());
for (auto j : joints)
{
const TkJointData jd = j->getData();
if (jd.actors[0] != jd.actors[1])
{
tracker.joints.insert(j);
}
}
}
}
membuf->release();
#endif
}
void recollectActors(std::vector<TkFamily*>& families, std::vector<TkActor*>& actors)
{
uint32_t totalActorCount = 0;
for (auto family : families)
{
EXPECT_LE(family->getActorCount() + totalActorCount, actors.size());
totalActorCount += family->getActors(actors.data() + totalActorCount, static_cast<uint32_t>(actors.size()) - totalActorCount);
}
}
void assemblyCreateAndRelease(bool createNRFJoints, bool serializationTest)
{
createFramework();
createTestAssets();
TkFramework* fw = NvBlastTkFrameworkGet();
const TkType* familyType = fw->getType(TkTypeIndex::Family);
EXPECT_TRUE(familyType != nullptr);
TestFamilyTracker tracker;
std::vector<TkFamily*> families1;
std::vector<TkFamily*> families2;
// Create one assembly
std::vector<TkActor*> actors1;
std::vector<TkJoint*> joints1;
createAssembly(actors1, joints1, createNRFJoints);
tracker.joints.insert(joints1.begin(), joints1.end());
// Create another assembly
std::vector<TkActor*> actors2;
std::vector<TkJoint*> joints2;
createAssembly(actors2, joints2, createNRFJoints);
tracker.joints.insert(joints2.begin(), joints2.end());
// Store families and fill group
for (size_t actorNum = 0; actorNum < actors1.size(); ++actorNum)
{
TkFamily& family = actors1[actorNum]->getFamily();
families1.push_back(&family);
family.addListener(tracker);
}
for (size_t actorNum = 0; actorNum < actors2.size(); ++actorNum)
{
TkFamily& family = actors2[actorNum]->getFamily();
families2.push_back(&family);
family.addListener(tracker);
}
if (serializationTest)
{
familySerialization(families1, tracker);
recollectActors(families1, actors1);
familySerialization(families2, tracker);
recollectActors(families2, actors2);
}
EXPECT_EQ(joints1.size() + joints2.size(), tracker.joints.size());
// Release 1st assembly's actors
for (size_t actorNum = 0; actorNum < actors1.size(); ++actorNum)
{
actors1[actorNum]->release();
}
if (serializationTest)
{
familySerialization(families2, tracker);
recollectActors(families2, actors2);
}
EXPECT_EQ(joints2.size(), tracker.joints.size());
// Release 2nd assembly's actors
for (size_t actorNum = 0; actorNum < actors1.size(); ++actorNum)
{
actors2[actorNum]->release();
}
EXPECT_EQ(0, tracker.joints.size());
releaseTestAssets();
releaseFramework();
}
void assemblyInternalJoints(bool testAssemblySerialization)
{
createFramework();
createTestAssets(true); // Create assets with internal joints
TkFramework* fw = NvBlastTkFrameworkGet();
TestFamilyTracker tracker;
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fw->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
TkActorDesc adesc(testAssets[0]);
TkActor* actor1 = fw->createActor(adesc);
EXPECT_TRUE(actor1 != nullptr);
tracker.insertActor(actor1);
actor1->getFamily().addListener(tracker);
TkFamily* family = &actor1->getFamily();
group->addActor(*actor1);
CSParams cs2(2, 0.0f);
NvBlastExtProgramParams csParams2 = { &cs2, nullptr };
actor1->damage(getCubeSlicerProgram(), &csParams2);
EXPECT_EQ((size_t)0, tracker.joints.size());
m_groupTM->process();
m_groupTM->wait();
if (testAssemblySerialization)
{
std::vector<TkFamily*> families;
families.push_back(family);
familySerialization(families, tracker);
family = families[0];
std::vector<TkActor*> actors(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (TkActor* actor : actors)
{
group->addActor(*actor);
}
}
EXPECT_EQ((size_t)2, family->getActorCount());
EXPECT_EQ((size_t)4, tracker.joints.size()); // 2) Create an actor with internal joints. Splitting the actor should cause joint create events to be dispatched
std::vector<TkActor*> actors(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (TkJoint* joint : tracker.joints)
{
TkJointData jd = joint->getData();
EXPECT_FALSE(actors.end() == std::find(actors.begin(), actors.end(), jd.actors[0]));
EXPECT_FALSE(actors.end() == std::find(actors.begin(), actors.end(), jd.actors[1]));
}
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialParams = { &radialDamage, nullptr };
for (TkActor* actor : actors)
{
actor->damage(getFalloffProgram(), &radialParams);
}
m_groupTM->process();
m_groupTM->wait();
if (testAssemblySerialization)
{
std::vector<TkFamily*> families;
families.push_back(family);
familySerialization(families, tracker);
family = families[0];
}
EXPECT_EQ((size_t)8, family->getActorCount());
EXPECT_EQ((size_t)4, tracker.joints.size());
// 3) Joint update events should be fired when attached actors change
actors.resize(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (TkJoint* joint : tracker.joints)
{
TkJointData jd = joint->getData();
EXPECT_FALSE(actors.end() == std::find(actors.begin(), actors.end(), jd.actors[0]));
EXPECT_FALSE(actors.end() == std::find(actors.begin(), actors.end(), jd.actors[1]));
}
for (TkActor* actor : actors)
{
actor->release();
}
EXPECT_EQ((size_t)0, tracker.joints.size()); // 4) Joint delete events should be fired when at least one attached actor is deleted
group->release();
releaseTestAssets();
releaseFramework();
}
void assemblyCompositeWithInternalJoints(bool createNRFJoints, bool serializationTest)
{
createFramework();
createTestAssets(true); // Create assets with internal joints
TkFramework* fw = NvBlastTkFrameworkGet();
const TkType* familyType = fw->getType(TkTypeIndex::Family);
EXPECT_TRUE(familyType != nullptr);
if (familyType == nullptr)
{
return;
}
TestFamilyTracker tracker;
std::vector<TkFamily*> families;
// Create assembly
std::vector<TkActor*> actors;
std::vector<TkJoint*> joints;
createAssembly(actors, joints, createNRFJoints);
tracker.joints.insert(joints.begin(), joints.end());
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fw->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
for (size_t i = 0; i < actors.size(); ++i)
{
TkFamily& family = actors[i]->getFamily();
families.push_back(&family);
family.addListener(tracker);
tracker.insertActor(actors[i]);
group->addActor(*actors[i]);
}
if (serializationTest)
{
familySerialization(families, tracker);
recollectActors(families, actors);
for (auto actor : actors)
{
group->addActor(*actor);
}
}
EXPECT_EQ((size_t)4, actors.size());
const size_t compJointCount = createNRFJoints ? (size_t)12 : (size_t)8;
EXPECT_EQ(compJointCount, tracker.joints.size());
CSParams cs2(2, 0.0f);
NvBlastExtProgramParams csParams2 = { &cs2, nullptr };
size_t totalActorCount = 0;
for (uint32_t i = 0; i < 4; ++i)
{
actors[i]->damage(getCubeSlicerProgram(), &csParams2);
m_groupTM->process();
m_groupTM->wait();
if (serializationTest)
{
familySerialization(families, tracker);
for (size_t j = 0; j < families.size(); ++j)
{
TkFamily* family = families[j];
std::vector<TkActor*> a(family->getActorCount());
family->getActors(a.data(), static_cast<uint32_t>(a.size()));
for (auto actor : a)
{
group->addActor(*actor);
}
EXPECT_TRUE(j <= i || a.size() == 1);
if (j > i && a.size() == 1)
{
actors[j] = a[0];
}
}
}
EXPECT_EQ((size_t)2, families[i]->getActorCount());
EXPECT_EQ((size_t)(compJointCount + 4 * (i + 1)), tracker.joints.size()); // Four joints created per actor
totalActorCount += families[i]->getActorCount();
}
actors.resize(totalActorCount);
totalActorCount = 0;
for (int i = 0; i < 4; ++i)
{
families[i]->getActors(actors.data() + totalActorCount, families[i]->getActorCount());
totalActorCount += families[i]->getActorCount();
}
for (TkJoint* joint : tracker.joints)
{
TkJointData jd = joint->getData();
EXPECT_TRUE(jd.actors[0] == nullptr || actors.end() != std::find(actors.begin(), actors.end(), jd.actors[0]));
EXPECT_TRUE(jd.actors[1] == nullptr || actors.end() != std::find(actors.begin(), actors.end(), jd.actors[1]));
}
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialParams = { &radialDamage, nullptr };
for (TkActor* actor : actors)
{
actor->damage(getFalloffProgram(), &radialParams);
}
m_groupTM->process();
m_groupTM->wait();
totalActorCount = 0;
for (int i = 0; i < 4; ++i)
{
totalActorCount += families[i]->getActorCount();
}
if (serializationTest)
{
familySerialization(families, tracker);
}
EXPECT_EQ((size_t)32, totalActorCount);
EXPECT_EQ(compJointCount + (size_t)16, tracker.joints.size());
actors.resize(totalActorCount);
totalActorCount = 0;
for (int i = 0; i < 4; ++i)
{
families[i]->getActors(actors.data() + totalActorCount, families[i]->getActorCount());
totalActorCount += families[i]->getActorCount();
}
// 3) Joint update events should be fired when attached actors change
for (TkActor* actor : actors)
{
actor->release();
}
EXPECT_EQ((size_t)0, tracker.joints.size()); // 4) Joint delete events should be fired when at least one attached actor is deleted
group->release();
releaseTestAssets();
releaseFramework();
}
void assemblyExternalJoints_MultiFamilyDamage(bool explicitJointRelease = true)
{
createFramework();
const NvBlastChunkDesc chunkDescs[3] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 4.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 0.0f,-1.0f, 0.0f }, 2.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.0f, 1.0f, 0.0f }, 2.0f, 0, NvBlastChunkDesc::SupportFlag, 2 }
};
const NvBlastBondDesc bondDesc =
// normal area centroid userData chunks
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 0.0f, 0.0f, 0.0f }, 0 }, { 1, 2 } };
TkFramework* framework = NvBlastTkFrameworkGet();
TestFamilyTracker tracker;
TkAssetDesc desc;
desc.chunkCount = 3;
desc.chunkDescs = chunkDescs;
desc.bondCount = 1;
desc.bondDescs = &bondDesc;
desc.bondFlags = nullptr;
TkAsset* asset = framework->createAsset(desc);
EXPECT_TRUE(asset != nullptr);
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = framework->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
TkActorDesc adesc(asset);
TkActor* actor1 = framework->createActor(adesc);
EXPECT_TRUE(actor1 != nullptr);
TkActor* actor2 = framework->createActor(adesc);
EXPECT_TRUE(actor2 != nullptr);
group->addActor(*actor1);
group->addActor(*actor2);
TkFamily* family1 = &actor1->getFamily();
TkFamily* family2 = &actor2->getFamily();
family1->addListener(tracker);
family2->addListener(tracker);
tracker.insertActor(actor1);
tracker.insertActor(actor2);
TkJointDesc jdesc;
jdesc.families[0] = family1;
jdesc.families[1] = family2;
jdesc.chunkIndices[0] = 2;
jdesc.chunkIndices[1] = 1;
jdesc.attachPositions[0] = NvVec3(0.0f, 1.0f, 0.0f);
jdesc.attachPositions[1] = NvVec3(0.0f, -1.0f, 0.0f);
TkJoint* joint = framework->createJoint(jdesc);
EXPECT_TRUE(joint != nullptr);
tracker.joints.insert(joint);
NvBlastExtRadialDamageDesc radialDamage1 = getRadialDamageDesc(0, 1, 0, 2, 2);
NvBlastExtProgramParams radialParams1 = { &radialDamage1, nullptr };
actor1->damage(getFalloffProgram(), &radialParams1);
NvBlastExtRadialDamageDesc radialDamage2 = getRadialDamageDesc(0, -1, 0, 2, 2);
NvBlastExtProgramParams radialParams2 = { &radialDamage2, nullptr };
actor2->damage(getFalloffProgram(), &radialParams2);
m_groupTM->process();
m_groupTM->wait();
TkActor* actors1[2];
TkActor* actors2[2];
EXPECT_EQ(2, family1->getActors(actors1, 2));
EXPECT_EQ(2, family2->getActors(actors2, 2));
const TkJointData jdata = joint->getData();
EXPECT_TRUE(jdata.actors[0] != nullptr);
EXPECT_TRUE(jdata.actors[1] != nullptr);
EXPECT_TRUE(&jdata.actors[0]->getFamily() == family1);
EXPECT_TRUE(&jdata.actors[1]->getFamily() == family2);
// Clean up
if (explicitJointRelease)
{
joint->release();
family2->release();
family1->release();
asset->release();
releaseFramework();
}
else
{
EXPECT_EQ(1, tracker.joints.size());
releaseFramework();
// Commenting these out - but shouldn't we be sending delete events when we release the framework?
// EXPECT_EQ(0, tracker.joints.size());
// EXPECT_EQ(0, tracker.actors.size());
}
}
protected:
// http://clang.llvm.org/compatibility.html#dep_lookup_bases
// http://stackoverflow.com/questions/6592512/templates-parent-class-member-variables-not-visible-in-inherited-class
using TkBaseTest<FailLevel, Verbosity>::testAssets;
using TkBaseTest<FailLevel, Verbosity>::m_taskman;
using TkBaseTest<FailLevel, Verbosity>::m_groupTM;
using TkBaseTest<FailLevel, Verbosity>::createFramework;
using TkBaseTest<FailLevel, Verbosity>::releaseFramework;
using TkBaseTest<FailLevel, Verbosity>::createTestAssets;
using TkBaseTest<FailLevel, Verbosity>::releaseTestAssets;
using TkBaseTest<FailLevel, Verbosity>::getCubeSlicerProgram;
using TkBaseTest<FailLevel, Verbosity>::getDefaultMaterial;
using TkBaseTest<FailLevel, Verbosity>::getRadialDamageDesc;
using TkBaseTest<FailLevel, Verbosity>::getFalloffProgram;
};
typedef TkCompositeTest<NvBlastMessage::Error, 1> TkCompositeTestAllowWarnings;
typedef TkCompositeTest<NvBlastMessage::Error, 1> TkCompositeTestStrict;
/*
1) Create assembly, actors and joints should be created automatically
*/
TEST_F(TkCompositeTestStrict, AssemblyCreateAndRelease_NoNRFJoints_NoSerialization)
{
assemblyCreateAndRelease(false, false);
}
TEST_F(TkCompositeTestStrict, DISABLED_AssemblyCreateAndRelease_NoNRFJoints_AssemblySerialization)
{
assemblyCreateAndRelease(false, true);
}
TEST_F(TkCompositeTestStrict, AssemblyCreateAndRelease_WithNRFJoints_NoSerialization)
{
assemblyCreateAndRelease(true, false);
}
TEST_F(TkCompositeTestStrict, DISABLED_AssemblyCreateAndRelease_WithNRFJoints_AssemblySerialization)
{
assemblyCreateAndRelease(true, true);
}
/**
2) Create an actor with internal joints. Splitting the actor should cause joint create events to be dispatched
3) Joint update events should be fired when attached actors change
4) Joint delete events should be fired when at least one attached actor is deleted
*/
TEST_F(TkCompositeTestStrict, AssemblyInternalJoints_NoSerialization)
{
assemblyInternalJoints(false);
}
TEST_F(TkCompositeTestStrict, DISABLED_AssemblyInternalJoints_AssemblySerialization)
{
assemblyInternalJoints(true);
}
/**
5) Creating a composite from assets with internal joints should have expected behaviors (1-4) above
*/
TEST_F(TkCompositeTestStrict, AssemblyCompositeWithInternalJoints_NoNRFJoints_NoSerialization)
{
assemblyCompositeWithInternalJoints(false, false);
}
TEST_F(TkCompositeTestStrict, DISABLED_AssemblyCompositeWithInternalJoints_NoNRFJoints_AssemblySerialization)
{
assemblyCompositeWithInternalJoints(false, true);
}
TEST_F(TkCompositeTestStrict, AssemblyCompositeWithInternalJoints_WithNRFJoints_NoSerialization)
{
assemblyCompositeWithInternalJoints(true, false);
}
TEST_F(TkCompositeTestStrict, DISABLED_AssemblyCompositeWithInternalJoints_WithNRFJoints_AssemblySerialization)
{
assemblyCompositeWithInternalJoints(true, true);
}
/*
More tests
*/
TEST_F(TkCompositeTestStrict, AssemblyExternalJoints_MultiFamilyDamage)
{
assemblyExternalJoints_MultiFamilyDamage(true);
}
TEST_F(TkCompositeTestStrict, AssemblyExternalJoints_MultiFamilyDamage_AutoJointRelease)
{
assemblyExternalJoints_MultiFamilyDamage(false);
}
| 27,666 | C++ | 34.699355 | 170 | 0.596689 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/FamilyGraphTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "BlastBaseTest.h"
#include "NvBlastSupportGraph.h"
#include "NvBlastFamilyGraph.h"
#include "NvBlastAssert.h"
#include "NvBlastIndexFns.h"
#include <stdlib.h>
#include <ostream>
#include <stdint.h>
#include <map>
#include <algorithm>
// ====================================================================================================================
// HELPERS
// ====================================================================================================================
::testing::AssertionResult VectorMatch(const std::vector<uint32_t>& actual, const uint32_t* expected, uint32_t size)
{
for (size_t i(0); i < size; ++i)
{
if (expected[i] != actual[i])
{
testing::Message msg;
msg << "array[" << i
<< "] (" << actual[i] << ") != expected[" << i
<< "] (" << expected[i] << ")";
return (::testing::AssertionFailure(msg));;
}
}
return ::testing::AssertionSuccess();
}
#define VECTOR_MATCH(actual, ...) \
{ \
const uint32_t arr[] = { __VA_ARGS__ }; \
const uint32_t size = (sizeof(arr) / sizeof(arr[0])); \
EXPECT_EQ(size, actual.size()); \
EXPECT_TRUE(VectorMatch(actual, arr, size)); \
}
// ====================================================================================================================
// TEST CLASS
// ====================================================================================================================
using namespace Nv::Blast;
template<int FailLevel, int Verbosity>
class FamilyGraphTest : public BlastBaseTest < FailLevel, Verbosity >
{
public:
FamilyGraphTest()
{
}
protected:
FamilyGraph* buildFamilyGraph(uint32_t chunkCount, const uint32_t* adjacentChunkPartition, const uint32_t* adjacentChunkIndices)
{
NVBLAST_ASSERT(m_memoryBlock.size() == 0); // can't build twice per test
// Fill SupportGraph with data:
NvBlastCreateOffsetStart(sizeof(SupportGraph));
const size_t NvBlastCreateOffsetAlign16(chunkIndicesOffset, chunkCount*sizeof(uint32_t));
const size_t NvBlastCreateOffsetAlign16(adjacencyPartitionOffset, (chunkCount + 1)*sizeof(uint32_t));
const size_t NvBlastCreateOffsetAlign16(adjacentNodeIndicesOffset, adjacentChunkPartition[chunkCount] * sizeof(uint32_t));
const size_t NvBlastCreateOffsetAlign16(adjacentBondIndicesOffset, adjacentChunkPartition[chunkCount] * sizeof(uint32_t));
const size_t graphDataSize = NvBlastCreateOffsetEndAlign16();
m_graphMemory.resize(graphDataSize);
m_graph = reinterpret_cast<SupportGraph*>(m_graphMemory.data());
m_graph->m_nodeCount = chunkCount;
m_graph->m_chunkIndicesOffset = static_cast<uint32_t>(chunkIndicesOffset);
m_graph->m_adjacencyPartitionOffset = static_cast<uint32_t>(adjacencyPartitionOffset);
m_graph->m_adjacentNodeIndicesOffset = static_cast<uint32_t>(adjacentNodeIndicesOffset);
m_graph->m_adjacentBondIndicesOffset = static_cast<uint32_t>(adjacentBondIndicesOffset);
memcpy(m_graph->getAdjacencyPartition(), adjacentChunkPartition, (chunkCount + 1) * sizeof(uint32_t));
memcpy(m_graph->getAdjacentNodeIndices(), adjacentChunkIndices, adjacentChunkPartition[chunkCount] * sizeof(uint32_t));
// fill bondIndices by incrementing bondIndex and putting same bondIndex in mirror bond index for (n0, n1) == (n1, n0)
memset(m_graph->getAdjacentBondIndices(), (uint32_t)-1, adjacentChunkPartition[chunkCount] * sizeof(uint32_t));
uint32_t bondIndex = 0;
for (uint32_t chunk0 = 0; chunk0 < m_graph->m_nodeCount; chunk0++)
{
for (uint32_t i = m_graph->getAdjacencyPartition()[chunk0]; i < m_graph->getAdjacencyPartition()[chunk0 + 1]; i++)
{
if (m_graph->getAdjacentBondIndices()[i] == (uint32_t)-1)
{
m_graph->getAdjacentBondIndices()[i] = bondIndex;
uint32_t chunk1 = m_graph->getAdjacentNodeIndices()[i];
for (uint32_t j = m_graph->getAdjacencyPartition()[chunk1]; j < m_graph->getAdjacencyPartition()[chunk1 + 1]; j++)
{
if (m_graph->getAdjacentNodeIndices()[j] == chunk0)
{
m_graph->getAdjacentBondIndices()[j] = bondIndex;
}
}
bondIndex++;
}
}
}
// reserve memory for family graph and asset pointer
uint32_t familyGraphMemorySize = (uint32_t)FamilyGraph::requiredMemorySize(m_graph->m_nodeCount, bondIndex);
m_memoryBlock.resize(familyGraphMemorySize);
// placement new family graph
const uint32_t bondCount = m_graph->getAdjacencyPartition()[m_graph->m_nodeCount] / 2;
FamilyGraph* familyGraph = new(m_memoryBlock.data()) FamilyGraph(m_graph->m_nodeCount, bondCount);
return familyGraph;
}
struct IslandInfo
{
std::vector<NodeIndex> nodes;
};
/**
Function to gather islands info for tests and debug purposes
Returned islands sorted by nodes counts. Island nodes also sorted by NodeIndex.
*/
void getIslandsInfo(const FamilyGraph& graph, std::vector<IslandInfo>& info)
{
IslandId* islandIds = graph.getIslandIds();
std::map<IslandId, IslandInfo> islandMap;
for (NodeIndex n = 0; n < m_graph->m_nodeCount; n++)
{
EXPECT_TRUE(islandIds[n] != invalidIndex<uint32_t>());
IslandId islandId = islandIds[n];
if (islandMap.find(islandId) == islandMap.end())
{
IslandInfo islandInfo;
islandInfo.nodes.push_back(n);
islandMap[islandId] = islandInfo;
}
else
{
islandMap[islandId].nodes.push_back(n);
}
}
for (auto it = islandMap.begin(); it != islandMap.end(); ++it)
{
std::sort(it->second.nodes.begin(), it->second.nodes.end());
info.push_back(it->second);
}
// sort islands by size ascending
std::sort(info.begin(), info.end(), [](const IslandInfo& i0, const IslandInfo& i1) -> bool
{
size_t s0 = i0.nodes.size();
size_t s1 = i1.nodes.size();
if (s0 == s1 && s0 > 0)
{
s0 = i0.nodes[0];
s1 = i1.nodes[0];
}
return s0 < s1;
});
}
static const uint32_t DEFAULT_ACTOR_INDEX = 0;
SupportGraph* m_graph;
std::vector<char> m_graphMemory;
std::vector<char> m_memoryBlock;
};
typedef FamilyGraphTest<NvBlastMessage::Error, 1> FamilyGraphTestAllowWarnings;
typedef FamilyGraphTest<NvBlastMessage::Warning, 1> FamilyGraphTestStrict;
// ====================================================================================================================
// GRAPH DATA
// ====================================================================================================================
// Graph 0:
//
// 0 -- 1 -- 2 -- 3
// | | | |
// | | | |
// 4 -- 5 6 -- 7
//
const uint32_t chunkCount0 = 8;
const uint32_t adjacentChunkPartition0[] = { 0, 2, 5, 8, 10, 12, 14, 16, 18 };
const uint32_t adjacentChunkIndices0[] = { /*0*/ 1, 4, /*1*/ 0, 2, 5, /*2*/ 1, 3, 6, /*3*/ 2, 7, /*4*/ 0, 5, /*5*/ 1, 4, /*6*/ 2, 7, /*7*/ 3, 6 };
// Graph 1:
//
// 0 -- 1 -- 2 -- 3
// | | | |
// 4 -- 5 -- 6 -- 7
// | | | |
// 8 -- 9 -- 10-- 11
//
const uint32_t chunkCount1 = 12;
const uint32_t adjacentChunkPartition1[] = { 0, 2, 5, 8, 10, 13, 17, 21, 24, 26, 29, 32, 34 };
const uint32_t adjacentChunkIndices1[] = { /*0*/ 1, 4, /*1*/ 0, 2, 5, /*2*/ 1, 3, 6, /*3*/ 2, 7, /*4*/ 0, 5, 8, /*5*/ 1, 4, 6, 9, /*6*/ 2, 5, 7, 10,
/*7*/ 3, 6, 11, /*8*/ 4, 9, /*9*/ 5, 8, 10, /*10*/ 6, 9, 11, /*11*/ 7, 10 };
// ====================================================================================================================
// TESTS
// ====================================================================================================================
TEST_F(FamilyGraphTestStrict, Graph0FindIslands0)
{
FamilyGraph* graph = buildFamilyGraph(chunkCount0, adjacentChunkPartition0, adjacentChunkIndices0);
graph->initialize(DEFAULT_ACTOR_INDEX, m_graph);
std::vector<char> scratch;
scratch.resize((size_t)FamilyGraph::findIslandsRequiredScratch(chunkCount0));
EXPECT_EQ(9, graph->getEdgesCount(m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 0, 4, m_graph);
EXPECT_EQ(8, graph->getEdgesCount(m_graph));
EXPECT_EQ(1, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 1, 2, m_graph);
EXPECT_EQ(1, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
std::vector<IslandInfo> info;
getIslandsInfo(*graph, info);
EXPECT_EQ(2, info.size());
VECTOR_MATCH(info[0].nodes, 0, 1, 4, 5);
VECTOR_MATCH(info[1].nodes, 2, 3, 6, 7);
}
TEST_F(FamilyGraphTestStrict, Graph0FindIslands1)
{
FamilyGraph* graph = buildFamilyGraph(chunkCount0, adjacentChunkPartition0, adjacentChunkIndices0);
graph->initialize(DEFAULT_ACTOR_INDEX, m_graph);
std::vector<char> scratch;
scratch.resize((size_t)FamilyGraph::findIslandsRequiredScratch(chunkCount0));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 0, 4, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 4, 5, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 1, 2, m_graph);
EXPECT_EQ(6, graph->getEdgesCount(m_graph));
EXPECT_EQ(3, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
std::vector<IslandInfo> info;
getIslandsInfo(*graph, info);
EXPECT_EQ(3, info.size());
VECTOR_MATCH(info[0].nodes, 4);
VECTOR_MATCH(info[1].nodes, 0, 1, 5);
VECTOR_MATCH(info[2].nodes, 2, 3, 6, 7);
}
TEST_F(FamilyGraphTestStrict, Graph0FindIslandsDifferentActors)
{
const uint32_t ACTOR_0_INDEX = 5;
const uint32_t ACTOR_1_INDEX = 2;
FamilyGraph* graph = buildFamilyGraph(chunkCount0, adjacentChunkPartition0, adjacentChunkIndices0);
graph->initialize(ACTOR_0_INDEX, m_graph);
std::vector<char> scratch;
scratch.resize((size_t)FamilyGraph::findIslandsRequiredScratch(chunkCount0));
EXPECT_EQ(0, graph->findIslands(ACTOR_1_INDEX, scratch.data(), m_graph));
EXPECT_EQ(1, graph->findIslands(ACTOR_0_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(ACTOR_0_INDEX, 2, 1, m_graph);
EXPECT_EQ(8, graph->getEdgesCount(m_graph));
EXPECT_EQ(1, graph->findIslands(ACTOR_0_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(ACTOR_1_INDEX, 2, 6, m_graph);
graph->notifyEdgeRemoved(ACTOR_1_INDEX, 7, 3, m_graph);
EXPECT_EQ(1, graph->findIslands(ACTOR_1_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(ACTOR_0_INDEX, 0, 1, m_graph);
graph->notifyEdgeRemoved(ACTOR_0_INDEX, 4, 5, m_graph);
EXPECT_EQ(1, graph->findIslands(ACTOR_0_INDEX, scratch.data(), m_graph));
std::vector<IslandInfo> info;
getIslandsInfo(*graph, info);
EXPECT_EQ(4, info.size());
VECTOR_MATCH(info[0].nodes, 0, 4);
VECTOR_MATCH(info[1].nodes, 1, 5);
VECTOR_MATCH(info[2].nodes, 2, 3);
VECTOR_MATCH(info[3].nodes, 6, 7);
}
TEST_F(FamilyGraphTestStrict, Graph1FindIslands0)
{
FamilyGraph* graph = buildFamilyGraph(chunkCount1, adjacentChunkPartition1, adjacentChunkIndices1);
graph->initialize(DEFAULT_ACTOR_INDEX, m_graph);
std::vector<char> scratch;
scratch.resize((size_t)FamilyGraph::findIslandsRequiredScratch(chunkCount1));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 0, 4, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 1, 5, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 2, 6, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 3, 7, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 5, 6, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 9, 10, m_graph);
EXPECT_EQ(11, graph->getEdgesCount(m_graph));
EXPECT_EQ(3, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
std::vector<IslandInfo> info;
getIslandsInfo(*graph, info);
EXPECT_EQ(3, info.size());
VECTOR_MATCH(info[0].nodes, 0, 1, 2, 3);
VECTOR_MATCH(info[1].nodes, 4, 5, 8, 9);
VECTOR_MATCH(info[2].nodes, 6, 7, 10, 11);
}
TEST_F(FamilyGraphTestStrict, Graph1FindIslands1)
{
FamilyGraph* graph = buildFamilyGraph(chunkCount1, adjacentChunkPartition1, adjacentChunkIndices1);
graph->initialize(DEFAULT_ACTOR_INDEX, m_graph);
std::vector<char> scratch;
scratch.resize((size_t)FamilyGraph::findIslandsRequiredScratch(chunkCount1));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 0, 4, m_graph);
EXPECT_EQ(1, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 1, 5, m_graph);
EXPECT_EQ(0, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 2, 6, m_graph);
EXPECT_EQ(0, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 3, 7, m_graph);
EXPECT_EQ(1, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 5, 6, m_graph);
EXPECT_EQ(0, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 9, 10, m_graph);
EXPECT_EQ(1, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
std::vector<IslandInfo> info;
getIslandsInfo(*graph, info);
EXPECT_EQ(3, info.size());
VECTOR_MATCH(info[0].nodes, 0, 1, 2, 3);
VECTOR_MATCH(info[1].nodes, 4, 5, 8, 9);
VECTOR_MATCH(info[2].nodes, 6, 7, 10, 11);
}
TEST_F(FamilyGraphTestStrict, Graph1FindIslandsRemoveAllEdges)
{
FamilyGraph* graph = buildFamilyGraph(chunkCount1, adjacentChunkPartition1, adjacentChunkIndices1);
graph->initialize(DEFAULT_ACTOR_INDEX, m_graph);
std::vector<char> scratch;
scratch.resize((size_t)FamilyGraph::findIslandsRequiredScratch(chunkCount1));
uint32_t edges = graph->getEdgesCount(m_graph);
for (uint32_t node0 = 0; node0 < chunkCount1; node0++)
{
for (uint32_t i = adjacentChunkPartition1[node0]; i < adjacentChunkPartition1[node0 + 1]; i++)
{
if (graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, node0, adjacentChunkIndices1[i], m_graph))
{
edges--;
EXPECT_EQ(edges, graph->getEdgesCount(m_graph));
}
}
}
EXPECT_EQ(0, graph->getEdgesCount(m_graph));
EXPECT_EQ(12, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
for (uint32_t node0 = 0; node0 < chunkCount1; node0++)
{
EXPECT_EQ(node0, graph->getIslandIds()[node0]);
}
}
| 16,954 | C++ | 40.761084 | 148 | 0.600979 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/utils/TestAssets.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef TESTASSETS_H
#define TESTASSETS_H
#include "NvBlast.h"
#include "AssetGenerator.h"
struct ExpectedAssetValues
{
uint32_t totalChunkCount;
uint32_t graphNodeCount;
uint32_t leafChunkCount;
uint32_t bondCount;
uint32_t subsupportChunkCount;
};
// Indexable asset descriptors and expected values
extern const NvBlastAssetDesc g_assetDescs[6];
extern const ExpectedAssetValues g_assetExpectedValues[6];
// Indexable asset descriptors for assets missing coverage and expected values
extern const NvBlastAssetDesc g_assetDescsMissingCoverage[6];
extern const ExpectedAssetValues g_assetsFromMissingCoverageExpectedValues[6];
inline uint32_t getAssetDescCount()
{
return sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
}
inline uint32_t getAssetDescMissingCoverageCount()
{
return sizeof(g_assetDescsMissingCoverage) / sizeof(g_assetDescsMissingCoverage[0]);
}
void generateCube(GeneratorAsset& cubeAsset, NvBlastAssetDesc& assetDesc, size_t maxDepth, size_t width,
int32_t supportDepth = -1, CubeAssetGenerator::BondFlags bondFlags = CubeAssetGenerator::ALL_INTERNAL_BONDS);
void generateRandomCube(GeneratorAsset& cubeAsset, NvBlastAssetDesc& assetDesc, uint32_t minChunkCount, uint32_t maxChunkCount);
#endif // #ifdef TESTASSETS_H
| 2,860 | C | 39.871428 | 128 | 0.776923 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/utils/TestAssets.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "TestAssets.h"
#include "AssetGenerator.h"
#include <algorithm>
const NvBlastChunkDesc g_cube1ChunkDescs[9] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 8.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {-0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {-0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {-0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {-0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 8 },
};
const NvBlastBondDesc g_cube1BondDescs[12] =
{
// normal area centroid userData chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f,-0.5f,-0.5f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f, 0.5f,-0.5f }, 0 }, { 3, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f,-0.5f, 0.5f }, 0 }, { 5, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f, 0.5f, 0.5f }, 0 }, { 7, 8 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, {-0.5f, 0.0f,-0.5f }, 0 }, { 1, 3 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 0.5f, 0.0f,-0.5f }, 0 }, { 2, 4 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, {-0.5f, 0.0f, 0.5f }, 0 }, { 5, 7 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 0.5f, 0.0f, 0.5f }, 0 }, { 6, 8 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, {-0.5f,-0.5f, 0.0f }, 0 }, { 1, 5 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, { 0.5f,-0.5f, 0.0f }, 0 }, { 2, 6 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, {-0.5f, 0.5f, 0.0f }, 0 }, { 3, 7 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, { 0.5f, 0.5f, 0.0f }, 0 }, { 4, 8 } },
};
const NvBlastBondDesc g_cube1BondDescs_wb[16] =
{
// normal area centroid userData chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f,-0.5f,-0.5f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f, 0.5f,-0.5f }, 0 }, { 3, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f,-0.5f, 0.5f }, 0 }, { 5, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f, 0.5f, 0.5f }, 0 }, { 7, 8 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, {-0.5f, 0.0f,-0.5f }, 0 }, { 1, 3 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 0.5f, 0.0f,-0.5f }, 0 }, { 2, 4 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, {-0.5f, 0.0f, 0.5f }, 0 }, { 5, 7 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 0.5f, 0.0f, 0.5f }, 0 }, { 6, 8 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, {-0.5f,-0.5f, 0.0f }, 0 }, { 1, 5 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, { 0.5f,-0.5f, 0.0f }, 0 }, { 2, 6 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, {-0.5f, 0.5f, 0.0f }, 0 }, { 3, 7 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, { 0.5f, 0.5f, 0.0f }, 0 }, { 4, 8 } },
{ { { 0.0f, 0.0f,-1.0f }, 1.0f, {-0.5f,-0.5f,-1.0f }, 0 }, { 1, UINT32_MAX } },
{ { { 0.0f, 0.0f,-1.0f }, 1.0f, { 0.5f,-0.5f,-1.0f }, 0 }, { 2, UINT32_MAX } },
{ { { 0.0f, 0.0f,-1.0f }, 1.0f, {-0.5f, 0.5f,-1.0f }, 0 }, { 3, UINT32_MAX } },
{ { { 0.0f, 0.0f,-1.0f }, 1.0f, { 0.5f, 0.5f,-1.0f }, 0 }, { 4, UINT32_MAX } },
};
const NvBlastChunkDesc g_cube2ChunkDescs[73] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 8.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {-0.5f, -0.5f, -0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.5f, -0.5f, -0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {-0.5f, 0.5f, -0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.5f, 0.5f, -0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {-0.5f, -0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.5f, -0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {-0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 8 },
{ {-0.25f-0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 9 },
{ { 0.25f-0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 10 },
{ {-0.25f-0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 11 },
{ { 0.25f-0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 12 },
{ {-0.25f-0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 13 },
{ { 0.25f-0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 14 },
{ {-0.25f-0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 15 },
{ { 0.25f-0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 16 },
{ {-0.25f+0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 17 },
{ { 0.25f+0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 18 },
{ {-0.25f+0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 19 },
{ { 0.25f+0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 20 },
{ {-0.25f+0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 21 },
{ { 0.25f+0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 22 },
{ {-0.25f+0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 23 },
{ { 0.25f+0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 24 },
{ {-0.25f-0.5f,-0.25f+0.5f,-0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 25 },
{ { 0.25f-0.5f,-0.25f+0.5f,-0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 26 },
{ {-0.25f-0.5f, 0.25f+0.5f,-0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 27 },
{ { 0.25f-0.5f, 0.25f+0.5f,-0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 28 },
{ {-0.25f-0.5f,-0.25f+0.5f, 0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 29 },
{ { 0.25f-0.5f,-0.25f+0.5f, 0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 30 },
{ {-0.25f-0.5f, 0.25f+0.5f, 0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 31 },
{ { 0.25f-0.5f, 0.25f+0.5f, 0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 32 },
{ {-0.25f+0.5f,-0.25f+0.5f,-0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 33 },
{ { 0.25f+0.5f,-0.25f+0.5f,-0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 34 },
{ {-0.25f+0.5f, 0.25f+0.5f,-0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 35 },
{ { 0.25f+0.5f, 0.25f+0.5f,-0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 36 },
{ {-0.25f+0.5f,-0.25f+0.5f, 0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 37 },
{ { 0.25f+0.5f,-0.25f+0.5f, 0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 38 },
{ {-0.25f+0.5f, 0.25f+0.5f, 0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 39 },
{ { 0.25f+0.5f, 0.25f+0.5f, 0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 40 },
{ {-0.25f-0.5f,-0.25f-0.5f,-0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 41 },
{ { 0.25f-0.5f,-0.25f-0.5f,-0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 42 },
{ {-0.25f-0.5f, 0.25f-0.5f,-0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 43 },
{ { 0.25f-0.5f, 0.25f-0.5f,-0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 44 },
{ {-0.25f-0.5f,-0.25f-0.5f, 0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 45 },
{ { 0.25f-0.5f,-0.25f-0.5f, 0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 46 },
{ {-0.25f-0.5f, 0.25f-0.5f, 0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 47 },
{ { 0.25f-0.5f, 0.25f-0.5f, 0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 48 },
{ {-0.25f+0.5f,-0.25f-0.5f,-0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 49 },
{ { 0.25f+0.5f,-0.25f-0.5f,-0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 50 },
{ {-0.25f+0.5f, 0.25f-0.5f,-0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 51 },
{ { 0.25f+0.5f, 0.25f-0.5f,-0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 52 },
{ {-0.25f+0.5f,-0.25f-0.5f, 0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 53 },
{ { 0.25f+0.5f,-0.25f-0.5f, 0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 54 },
{ {-0.25f+0.5f, 0.25f-0.5f, 0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 55 },
{ { 0.25f+0.5f, 0.25f-0.5f, 0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 56 },
{ {-0.25f-0.5f,-0.25f+0.5f,-0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 57 },
{ { 0.25f-0.5f,-0.25f+0.5f,-0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 58 },
{ {-0.25f-0.5f, 0.25f+0.5f,-0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 59 },
{ { 0.25f-0.5f, 0.25f+0.5f,-0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 60 },
{ {-0.25f-0.5f,-0.25f+0.5f, 0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 61 },
{ { 0.25f-0.5f,-0.25f+0.5f, 0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 62 },
{ {-0.25f-0.5f, 0.25f+0.5f, 0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 63 },
{ { 0.25f-0.5f, 0.25f+0.5f, 0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 64 },
{ {-0.25f+0.5f,-0.25f+0.5f,-0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 65 },
{ { 0.25f+0.5f,-0.25f+0.5f,-0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 66 },
{ {-0.25f+0.5f, 0.25f+0.5f,-0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 67 },
{ { 0.25f+0.5f, 0.25f+0.5f,-0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 68 },
{ {-0.25f+0.5f,-0.25f+0.5f, 0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 69 },
{ { 0.25f+0.5f,-0.25f+0.5f, 0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 70 },
{ {-0.25f+0.5f, 0.25f+0.5f, 0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 71 },
{ { 0.25f+0.5f, 0.25f+0.5f, 0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 72 },
};
const NvBlastChunkDesc g_cube3ChunkDescs[11] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 4.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 0.0f, 0.0f, 0.0f }, 3.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 1 },
{ { 0.0f, 0.0f, 0.0f }, 1.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 2 },
{ {-0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {-0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.5f, 0.5f,-0.5f }, 1.0f, 1, NvBlastChunkDesc::SupportFlag, 6 },
{ {-0.5f,-0.5f, 0.5f }, 1.0f, 1, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.5f,-0.5f, 0.5f }, 1.0f, 1, NvBlastChunkDesc::SupportFlag, 8 },
{ {-0.5f, 0.5f, 0.5f }, 1.0f, 2, NvBlastChunkDesc::SupportFlag, 9 },
{ { 0.5f, 0.5f, 0.5f }, 1.0f, 2, NvBlastChunkDesc::SupportFlag, 10 },
};
const NvBlastBondDesc g_cube3BondDescs[12] =
{
// normal area centroid userData chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f,-0.5f,-0.5f }, 0 }, { 3, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f, 0.5f,-0.5f }, 0 }, { 5, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f,-0.5f, 0.5f }, 0 }, { 7, 8 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f, 0.5f, 0.5f }, 0 }, { 9, 10} },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{-0.5f, 0.0f,-0.5f }, 0 }, { 3, 5 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 0.5f, 0.0f,-0.5f }, 0 }, { 4, 6 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{-0.5f, 0.0f, 0.5f }, 0 }, { 7, 9 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 0.5f, 0.0f, 0.5f }, 0 }, { 8, 10} },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{-0.5f,-0.5f, 0.0f }, 0 }, { 3, 7 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{ 0.5f,-0.5f, 0.0f }, 0 }, { 4, 8 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{-0.5f, 0.5f, 0.0f }, 0 }, { 5, 9 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{ 0.5f, 0.5f, 0.0f }, 0 }, { 6, 10} },
};
const NvBlastBondDesc g_cube3BondDescs_wb[16] =
{
// normal area centroid userData chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f,-0.5f,-0.5f }, 0 }, { 3, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f, 0.5f,-0.5f }, 0 }, { 5, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f,-0.5f, 0.5f }, 0 }, { 7, 8 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f, 0.5f, 0.5f }, 0 }, { 9, 10} },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{-0.5f, 0.0f,-0.5f }, 0 }, { 3, 5 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 0.5f, 0.0f,-0.5f }, 0 }, { 4, 6 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{-0.5f, 0.0f, 0.5f }, 0 }, { 7, 9 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 0.5f, 0.0f, 0.5f }, 0 }, { 8, 10} },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{-0.5f,-0.5f, 0.0f }, 0 }, { 3, 7 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{ 0.5f,-0.5f, 0.0f }, 0 }, { 4, 8 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{-0.5f, 0.5f, 0.0f }, 0 }, { 5, 9 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{ 0.5f, 0.5f, 0.0f }, 0 }, { 6, 10} },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{-0.5f,-0.5f,-1.0f }, 0 }, { 3, UINT32_MAX} },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{ 0.5f,-0.5f,-1.0f }, 0 }, { 4, UINT32_MAX} },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{-0.5f, 0.5f,-1.0f }, 0 }, { 5, UINT32_MAX} },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{ 0.5f, 0.5f,-1.0f }, 0 }, { 6, UINT32_MAX} },
};
const NvBlastAssetDesc g_assetDescs[6] =
{
// 2x2x2 axis-aligned cube centered at the origin, split into 8 depth-1 1x1x1 child chunks
{ sizeof(g_cube1ChunkDescs) / sizeof(g_cube1ChunkDescs[0]), g_cube1ChunkDescs, sizeof(g_cube1BondDescs) / sizeof(g_cube1BondDescs[0]), g_cube1BondDescs },
// 2x2x2 axis-aligned cube centered at the origin, split into 8 depth-1 1x1x1 child chunks which are then split into 8 depth-2 (1/2)x(1/2)x(1/2) child chunks each
// Support is at depth-1, so the g_cube1BondDescs are used
{ sizeof(g_cube2ChunkDescs) / sizeof(g_cube2ChunkDescs[0]), g_cube2ChunkDescs, sizeof(g_cube1BondDescs) / sizeof(g_cube1BondDescs[0]), g_cube1BondDescs },
// 2x2x2 axis-aligned cube centered at the origin, split into 8 depth-1 1x1x1 child chunks with multiple roots
{ sizeof(g_cube3ChunkDescs) / sizeof(g_cube3ChunkDescs[0]), g_cube3ChunkDescs, sizeof(g_cube3BondDescs) / sizeof(g_cube3BondDescs[0]), g_cube3BondDescs },
// 2x2x2 axis-aligned cube centered at the origin, split into 8 depth-1 1x1x1 child chunks - contains world-bound chunks
{ sizeof(g_cube1ChunkDescs) / sizeof(g_cube1ChunkDescs[0]), g_cube1ChunkDescs, sizeof(g_cube1BondDescs_wb) / sizeof(g_cube1BondDescs_wb[0]), g_cube1BondDescs_wb },
// 2x2x2 axis-aligned cube centered at the origin, split into 8 depth-1 1x1x1 child chunks which are then split into 8 depth-2 (1/2)x(1/2)x(1/2) child chunks each - contains world-bound chunks
// Support is at depth-1, so the g_cube1BondDescs_wb are used
{ sizeof(g_cube2ChunkDescs) / sizeof(g_cube2ChunkDescs[0]), g_cube2ChunkDescs, sizeof(g_cube1BondDescs_wb) / sizeof(g_cube1BondDescs_wb[0]), g_cube1BondDescs_wb },
// 2x2x2 axis-aligned cube centered at the origin, split into 8 depth-1 1x1x1 child chunks with multiple roots - contains world-bound chunks
{ sizeof(g_cube3ChunkDescs) / sizeof(g_cube3ChunkDescs[0]), g_cube3ChunkDescs, sizeof(g_cube3BondDescs_wb) / sizeof(g_cube3BondDescs_wb[0]), g_cube3BondDescs_wb },
};
struct ExpectedValues
{
uint32_t totalChunkCount;
uint32_t graphNodeCount;
uint32_t leafChunkCount;
uint32_t bondCount;
uint32_t subsupportChunkCount;
};
const ExpectedAssetValues g_assetExpectedValues[6] =
{
// total graph leaves bonds sub
{ 9, 8, 8, 12, 0 },
{ 73, 8, 64, 12, 64 },
{ 11, 8, 8, 12, 0 },
{ 9, 9, 8, 16, 0 },
{ 73, 9, 64, 16, 64 },
{ 11, 9, 8, 16, 0 },
};
///////////// Badly-formed asset descs below //////////////
const NvBlastChunkDesc g_cube1ChunkDescsMissingCoverage[9] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 8.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {-0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {-0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {-0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {-0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::NoFlags, 8 },
};
const NvBlastChunkDesc g_cube2ChunkDescsMissingCoverage1[17] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 8.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {-0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::NoFlags, 1 },
{ { 0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {-0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {-0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {-0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::NoFlags, 8 },
{ {-0.25f-0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 9 },
{ { 0.25f-0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 10 },
{ {-0.25f-0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 11 },
{ { 0.25f-0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 12 },
{ {-0.25f-0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 13 },
{ { 0.25f-0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 14 },
{ {-0.25f-0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 15 },
{ { 0.25f-0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 16 },
};
const NvBlastChunkDesc g_cube2ChunkDescsMissingCoverage2[17] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 8.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {-0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::NoFlags, 1 },
{ { 0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {-0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {-0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {-0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::NoFlags, 8 },
{ {-0.25f-0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 9 },
{ { 0.25f-0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 10 },
{ {-0.25f-0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 11 },
{ { 0.25f-0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 12 },
{ {-0.25f-0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 13 },
{ { 0.25f-0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 14 },
{ {-0.25f-0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 15 },
{ { 0.25f-0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::SupportFlag, 16 },
};
const NvBlastAssetDesc g_assetDescsMissingCoverage[6] =
{
{ sizeof(g_cube1ChunkDescsMissingCoverage) / sizeof(g_cube1ChunkDescsMissingCoverage[0]), g_cube1ChunkDescsMissingCoverage, sizeof(g_cube1BondDescs) / sizeof(g_cube1BondDescs[0]), g_cube1BondDescs },
{ sizeof(g_cube2ChunkDescsMissingCoverage1) / sizeof(g_cube2ChunkDescsMissingCoverage1[0]), g_cube2ChunkDescsMissingCoverage1, sizeof(g_cube1BondDescs) / sizeof(g_cube1BondDescs[0]), g_cube1BondDescs },
{ sizeof(g_cube2ChunkDescsMissingCoverage2) / sizeof(g_cube2ChunkDescsMissingCoverage2[0]), g_cube2ChunkDescsMissingCoverage2, sizeof(g_cube1BondDescs) / sizeof(g_cube1BondDescs[0]), g_cube1BondDescs },
{ sizeof(g_cube1ChunkDescsMissingCoverage) / sizeof(g_cube1ChunkDescsMissingCoverage[0]), g_cube1ChunkDescsMissingCoverage, sizeof(g_cube1BondDescs_wb) / sizeof(g_cube1BondDescs_wb[0]), g_cube1BondDescs_wb },
{ sizeof(g_cube2ChunkDescsMissingCoverage1) / sizeof(g_cube2ChunkDescsMissingCoverage1[0]), g_cube2ChunkDescsMissingCoverage1, sizeof(g_cube1BondDescs_wb) / sizeof(g_cube1BondDescs_wb[0]), g_cube1BondDescs_wb },
{ sizeof(g_cube2ChunkDescsMissingCoverage2) / sizeof(g_cube2ChunkDescsMissingCoverage2[0]), g_cube2ChunkDescsMissingCoverage2, sizeof(g_cube1BondDescs_wb) / sizeof(g_cube1BondDescs_wb[0]), g_cube1BondDescs_wb },
};
extern const ExpectedAssetValues g_assetsFromMissingCoverageExpectedValues[6] =
{
// total graph leaves bonds sub
{ 9, 8, 8, 12, 0 },
{ 17, 8, 15, 12, 8 },
{ 17, 15, 15, 9, 0 },
{ 9, 9, 8, 16, 0 },
{ 17, 9, 15, 16, 8 },
{ 17, 16, 15, 12, 0 },
};
void generateCube(GeneratorAsset& cubeAsset, NvBlastAssetDesc& assetDesc, size_t maxDepth, size_t width, int32_t supportDepth, CubeAssetGenerator::BondFlags bondFlags)
{
CubeAssetGenerator::Settings settings;
settings.extents = GeneratorAsset::Vec3(1, 1, 1);
CubeAssetGenerator::DepthInfo depthInfo;
depthInfo.slicesPerAxis = GeneratorAsset::Vec3(1, 1, 1);
depthInfo.flag = NvBlastChunkDesc::Flags::NoFlags;
settings.depths.push_back(depthInfo);
settings.bondFlags = bondFlags;
for (uint32_t depth = 1; depth < maxDepth; ++depth)
{
depthInfo.slicesPerAxis = GeneratorAsset::Vec3((float)width, (float)width, (float)width);
settings.depths.push_back(depthInfo);
}
settings.depths[(supportDepth > 0 ? supportDepth : maxDepth) - 1].flag = NvBlastChunkDesc::SupportFlag; // Leaves are support
CubeAssetGenerator::generate(cubeAsset, settings);
assetDesc.bondCount = (uint32_t)cubeAsset.solverBonds.size();
assetDesc.bondDescs = cubeAsset.solverBonds.data();
assetDesc.chunkCount = (uint32_t)cubeAsset.chunks.size();
assetDesc.chunkDescs = cubeAsset.solverChunks.data();
}
void generateRandomCube(GeneratorAsset& cubeAsset, NvBlastAssetDesc& desc, uint32_t minChunkCount, uint32_t maxChunkCount)
{
CubeAssetGenerator::Settings settings;
settings.extents = GeneratorAsset::Vec3(1, 1, 1);
CubeAssetGenerator::DepthInfo depthInfo;
depthInfo.slicesPerAxis = GeneratorAsset::Vec3(1, 1, 1);
depthInfo.flag = NvBlastChunkDesc::Flags::NoFlags;
settings.depths.push_back(depthInfo);
uint32_t chunkCount = 1;
while (chunkCount < minChunkCount)
{
uint32_t chunkMul;
do
{
depthInfo.slicesPerAxis = GeneratorAsset::Vec3((float)(1 + rand() % 4), (float)(1 + rand() % 4), (float)(1 + rand() % 4));
chunkMul = (uint32_t)(depthInfo.slicesPerAxis.x * depthInfo.slicesPerAxis.y * depthInfo.slicesPerAxis.z);
} while (chunkMul == 1);
if (chunkCount*chunkMul > maxChunkCount)
{
break;
}
chunkCount *= chunkMul;
settings.depths.push_back(depthInfo);
settings.extents = settings.extents * depthInfo.slicesPerAxis;
}
settings.depths.back().flag = NvBlastChunkDesc::SupportFlag; // Leaves are support
// Make largest direction unit size
settings.extents = settings.extents * (1.0f / std::max(settings.extents.x, std::max(settings.extents.y, settings.extents.z)));
// Create asset
CubeAssetGenerator::generate(cubeAsset, settings);
desc.chunkDescs = cubeAsset.solverChunks.data();
desc.chunkCount = (uint32_t)cubeAsset.solverChunks.size();
desc.bondDescs = cubeAsset.solverBonds.data();
desc.bondCount = (uint32_t)cubeAsset.solverBonds.size();
} | 28,915 | C++ | 68.011933 | 215 | 0.511292 |
NVIDIA-Omniverse/PhysX/blast/source/test/src/utils/TaskDispatcher.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#pragma once
#include <thread>
#include <mutex>
#include <queue>
#include <list>
#include <future>
#include <condition_variable>
#include <memory>
#include <atomic>
class TaskDispatcher
{
public:
class Task
{
public:
virtual void process() = 0;
virtual ~Task() {};
};
typedef std::function<void(TaskDispatcher& dispatcher, std::unique_ptr<Task>)> OnTaskFinishedFunction;
TaskDispatcher(uint32_t threadCount, OnTaskFinishedFunction onTaskFinished) :
m_workingThreadsCount(0), m_onTaskFinished(onTaskFinished)
{
m_threads.resize(threadCount);
for (uint32_t i = 0; i < threadCount; i++)
{
m_threads[i] = std::unique_ptr<Thread>(new Thread(i, m_completionSemaphore));
m_threads[i]->start();
m_freeThreads.push(m_threads[i].get());
}
}
void addTask(std::unique_ptr<Task> task)
{
m_tasks.push(std::move(task));
}
void process()
{
// main loop
while (m_tasks.size() > 0 || m_workingThreadsCount > 0)
{
// assign tasks
while (!(m_tasks.empty() || m_freeThreads.empty()))
{
auto task = std::move(m_tasks.front());
m_tasks.pop();
Thread* freeThread = m_freeThreads.front();
m_freeThreads.pop();
freeThread->processTask(std::move(task));
m_workingThreadsCount++;
}
m_completionSemaphore.wait();
// check for completion
for (std::unique_ptr<Thread>& thread : m_threads)
{
if (thread->isTaskFinished())
{
std::unique_ptr<Task> task;
thread->collectTask(task);
m_onTaskFinished(*this, std::move(task));
m_freeThreads.push(thread.get());
m_workingThreadsCount--;
break;
}
}
}
}
private:
class Semaphore
{
public:
Semaphore(int count_ = 0)
: m_count(count_) {}
inline void notify()
{
std::unique_lock<std::mutex> lock(m_mutex);
m_count++;
m_cv.notify_one();
}
inline void wait()
{
std::unique_lock<std::mutex> lock(m_mutex);
while (m_count == 0){
m_cv.wait(lock);
}
m_count--;
}
private:
std::mutex m_mutex;
std::condition_variable m_cv;
int m_count;
};
class Thread
{
public:
Thread(uint32_t id_, Semaphore& completionSemaphore) : m_id(id_), m_completionSemaphore(completionSemaphore), m_running(false), m_taskFinished(false) {}
virtual ~Thread() { stop(); }
void start()
{
if (!m_running)
{
m_running = true;
m_thread = std::thread(&Thread::body, this);
}
}
void stop()
{
if (m_running)
{
m_running = false;
m_newTaskSemaphore.notify();
m_thread.join();
}
}
void processTask(std::unique_ptr<Task> task)
{
m_task = std::move(task);
m_taskFinished = false;
m_newTaskSemaphore.notify();
}
void collectTask(std::unique_ptr<Task>& task)
{
task = std::move(m_task);
m_task = nullptr;
m_taskFinished = false;
}
bool hasTask() const { return m_task != nullptr; }
bool isTaskFinished() const { return m_taskFinished; }
private:
void body()
{
while (1)
{
m_newTaskSemaphore.wait();
if (!m_running)
return;
m_task->process();
m_taskFinished = true;
m_completionSemaphore.notify();
}
}
uint32_t m_id;
Semaphore& m_completionSemaphore;
std::thread m_thread;
bool m_running;
std::unique_ptr<Task> m_task;
std::atomic<bool> m_taskFinished;
Semaphore m_newTaskSemaphore;
};
private:
uint32_t m_workingThreadsCount;
std::queue<std::unique_ptr<Task>> m_tasks;
OnTaskFinishedFunction m_onTaskFinished;
std::vector<std::unique_ptr<Thread>> m_threads;
std::queue<Thread*> m_freeThreads;
Semaphore m_completionSemaphore;
};
| 6,361 | C | 28.050228 | 160 | 0.545512 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshUtils.cpp | #include "NvBlastExtAuthoringMeshUtils.h"
#include "NvBlastExtAuthoringMeshImpl.h"
#include "NvBlastExtAuthoringPerlinNoise.h"
#include "NvBlastExtAuthoringFractureTool.h"
#include <NvBlastNvSharedHelpers.h>
#include <NvCMath.h>
#include <algorithm>
using namespace nvidia;
#define UV_SCALE 1.f
#define CYLINDER_UV_SCALE (UV_SCALE * 1.732f)
namespace Nv
{
namespace Blast
{
void getTangents(const NvVec3& normal, NvVec3& t1, NvVec3& t2)
{
if (std::abs(normal.z) < 0.9)
{
t1 = normal.cross(NvVec3(0, 0, 1));
}
else
{
t1 = normal.cross(NvVec3(1, 0, 0));
}
t2 = t1.cross(normal);
}
Mesh* getCuttingBox(const NvVec3& point, const NvVec3& normal, float size, int64_t id, int32_t interiorMaterialId)
{
NvVec3 lNormal = normal.getNormalized();
NvVec3 t1, t2;
getTangents(lNormal, t1, t2);
std::vector<Vertex> positions(8);
toNvShared(positions[0].p) = point + (t1 + t2) * size;
toNvShared(positions[1].p) = point + (t2 - t1) * size;
toNvShared(positions[2].p) = point + (-t1 - t2) * size;
toNvShared(positions[3].p) = point + (t1 - t2) * size;
toNvShared(positions[4].p) = point + (t1 + t2 + lNormal) * size;
toNvShared(positions[5].p) = point + (t2 - t1 + lNormal) * size;
toNvShared(positions[6].p) = point + (-t1 - t2 + lNormal) * size;
toNvShared(positions[7].p) = point + (t1 - t2 + lNormal) * size;
toNvShared(positions[0].n) = -lNormal;
toNvShared(positions[1].n) = -lNormal;
toNvShared(positions[2].n) = -lNormal;
toNvShared(positions[3].n) = -lNormal;
toNvShared(positions[4].n) = -lNormal;
toNvShared(positions[5].n) = -lNormal;
toNvShared(positions[6].n) = -lNormal;
toNvShared(positions[7].n) = -lNormal;
positions[0].uv[0] = { 0, 0 };
positions[1].uv[0] = {UV_SCALE, 0};
positions[2].uv[0] = {UV_SCALE, UV_SCALE};
positions[3].uv[0] = {0, UV_SCALE};
positions[4].uv[0] = {0, 0};
positions[5].uv[0] = {UV_SCALE, 0};
positions[6].uv[0] = {UV_SCALE, UV_SCALE};
positions[7].uv[0] = {0, UV_SCALE};
std::vector<Edge> edges;
std::vector<Facet> facets;
edges.push_back({0, 1});
edges.push_back({1, 2});
edges.push_back({2, 3});
edges.push_back({3, 0});
facets.push_back({0, 4, id, interiorMaterialId, -1});
edges.push_back({0, 3});
edges.push_back({3, 7});
edges.push_back({7, 4});
edges.push_back({4, 0});
facets.push_back({4, 4, id, interiorMaterialId, -1});
edges.push_back({3, 2});
edges.push_back({2, 6});
edges.push_back({6, 7});
edges.push_back({7, 3});
facets.push_back({8, 4, id, interiorMaterialId, -1});
edges.push_back({5, 6});
edges.push_back({6, 2});
edges.push_back({2, 1});
edges.push_back({1, 5});
facets.push_back({12, 4, id, interiorMaterialId, -1});
edges.push_back({4, 5});
edges.push_back({5, 1});
edges.push_back({1, 0});
edges.push_back({0, 4});
facets.push_back({16, 4, id, interiorMaterialId, -1});
edges.push_back({4, 7});
edges.push_back({7, 6});
edges.push_back({6, 5});
edges.push_back({5, 4});
facets.push_back({20, 4, id, interiorMaterialId, -1});
return new MeshImpl(positions.data(), edges.data(), facets.data(), static_cast<uint32_t>(positions.size()),
static_cast<uint32_t>(edges.size()), static_cast<uint32_t>(facets.size()));
}
void inverseNormalAndIndices(Mesh* mesh)
{
for (uint32_t i = 0; i < mesh->getVerticesCount(); ++i)
{
toNvShared(mesh->getVerticesWritable()[i].n) *= -1.0f;
}
for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
{
mesh->getFacetWritable(i)->userData = -mesh->getFacet(i)->userData;
}
}
void setCuttingBox(const NvVec3& point, const NvVec3& normal, Mesh* mesh, float size, int64_t id)
{
NvVec3 t1, t2;
NvVec3 lNormal = normal.getNormalized();
getTangents(lNormal, t1, t2);
Vertex* positions = mesh->getVerticesWritable();
toNvShared(positions[0].p) = point + (t1 + t2) * size;
toNvShared(positions[1].p) = point + (t2 - t1) * size;
toNvShared(positions[2].p) = point + (-t1 - t2) * size;
toNvShared(positions[3].p) = point + (t1 - t2) * size;
toNvShared(positions[4].p) = point + (t1 + t2 + lNormal) * size;
toNvShared(positions[5].p) = point + (t2 - t1 + lNormal) * size;
toNvShared(positions[6].p) = point + (-t1 - t2 + lNormal) * size;
toNvShared(positions[7].p) = point + (t1 - t2 + lNormal) * size;
toNvShared(positions[0].n) = -lNormal;
toNvShared(positions[1].n) = -lNormal;
toNvShared(positions[2].n) = -lNormal;
toNvShared(positions[3].n) = -lNormal;
toNvShared(positions[4].n) = -lNormal;
toNvShared(positions[5].n) = -lNormal;
toNvShared(positions[6].n) = -lNormal;
toNvShared(positions[7].n) = -lNormal;
for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
{
mesh->getFacetWritable(i)->userData = id;
}
mesh->recalculateBoundingBox();
}
struct Stepper
{
virtual nvidia::NvVec3 getStep1(uint32_t w, uint32_t h) const = 0;
virtual nvidia::NvVec3 getStep2(uint32_t w) const = 0;
virtual nvidia::NvVec3 getStart() const = 0;
virtual nvidia::NvVec3 getNormal(uint32_t w, uint32_t h) const = 0;
virtual bool isStep2ClosedLoop() const
{
return false;
}
virtual bool isStep2FreeBoundary() const
{
return false;
}
};
struct PlaneStepper : public Stepper
{
PlaneStepper(const nvidia::NvVec3& normal, const nvidia::NvVec3& point, float sizeX, float sizeY,
uint32_t resolutionX, uint32_t resolutionY, bool swapTangents = false)
{
NvVec3 t1, t2;
lNormal = normal.getNormalized();
getTangents(lNormal, t1, t2);
if (swapTangents)
{
std::swap(t1, t2);
}
t11d = -t1 * 2.0f * sizeX / resolutionX;
t12d = -t2 * 2.0f * sizeY / resolutionY;
t21d = t11d;
t22d = t12d;
cPos = point + (t1 * sizeX + t2 * sizeY);
resY = resolutionY;
}
// Define face by 4 corner points, points should lay in plane
PlaneStepper(const nvidia::NvVec3& p11, const nvidia::NvVec3& p12, const nvidia::NvVec3& p21, const nvidia::NvVec3& p22,
uint32_t resolutionX, uint32_t resolutionY)
{
lNormal = -(p21 - p11).cross(p12 - p11).getNormalized();
if (lNormal.magnitude() < 1e-5)
{
lNormal = (p21 - p22).cross(p12 - p22).getNormalized();
}
t11d = (p11 - p21) / resolutionX;
t12d = (p12 - p11) / resolutionY;
t21d = (p12 - p22) / resolutionX;
t22d = (p22 - p21) / resolutionY;
cPos = p21;
resY = resolutionY;
}
nvidia::NvVec3 getStep1(uint32_t y, uint32_t) const
{
return (t11d * (resY - y) + t21d * y) / resY;
}
nvidia::NvVec3 getStep2(uint32_t) const
{
return t22d;
}
nvidia::NvVec3 getStart() const
{
return cPos;
}
nvidia::NvVec3 getNormal(uint32_t, uint32_t) const
{
return lNormal;
}
NvVec3 t11d, t12d, t21d, t22d, cPos, lNormal;
uint32_t resY;
};
void fillEdgesAndFaces(std::vector<Edge>& edges, std::vector<Facet>& facets, uint32_t h, uint32_t w,
uint32_t firstVertex, uint32_t verticesCount, int64_t id, int32_t interiorMaterialId,
int32_t smoothingGroup = -1, bool reflected = false)
{
for (uint32_t i = 0; i < w; ++i)
{
for (uint32_t j = 0; j < h; ++j)
{
int32_t start = edges.size();
uint32_t idx00 = i * (h + 1) + j + firstVertex;
uint32_t idx01 = idx00 + 1;
uint32_t idx10 = (idx00 + h + 1) % verticesCount;
uint32_t idx11 = (idx01 + h + 1) % verticesCount;
if (reflected)
{
edges.push_back({idx01, idx11});
edges.push_back({idx11, idx10});
edges.push_back({idx10, idx01});
facets.push_back({start, 3, id, interiorMaterialId, smoothingGroup});
start = edges.size();
edges.push_back({idx01, idx10});
edges.push_back({idx10, idx00});
edges.push_back({idx00, idx01});
facets.push_back({start, 3, id, interiorMaterialId, smoothingGroup});
}
else
{
edges.push_back({idx00, idx01});
edges.push_back({idx01, idx11});
edges.push_back({idx11, idx00});
facets.push_back({start, 3, id, interiorMaterialId, smoothingGroup});
start = edges.size();
edges.push_back({idx00, idx11});
edges.push_back({idx11, idx10});
edges.push_back({idx10, idx00});
facets.push_back({start, 3, id, interiorMaterialId, smoothingGroup});
}
}
}
}
void getNoisyFace(std::vector<Vertex>& vertices, std::vector<Edge>& edges, std::vector<Facet>& facets, uint32_t h,
uint32_t w, const nvidia::NvVec2& uvOffset, const nvidia::NvVec2& uvScale, const Stepper& stepper,
SimplexNoise& nEval, int64_t id, int32_t interiorMaterialId, bool randomizeLast = false)
{
uint32_t randIdx = randomizeLast ? 1 : 0;
NvVec3 cPosit = stepper.getStart();
uint32_t firstVertex = vertices.size();
for (uint32_t i = 0; i < w + 1; ++i)
{
NvVec3 lcPosit = cPosit;
for (uint32_t j = 0; j < h + 1; ++j)
{
vertices.push_back(Vertex());
toNvShared(vertices.back().p) = lcPosit;
toNvShared(vertices.back().uv[0]) = uvOffset + uvScale.multiply(nvidia::NvVec2(j, i));
lcPosit += stepper.getStep1(i, j);
}
cPosit += stepper.getStep2(i);
}
for (uint32_t i = 1 - randIdx; i < w + randIdx; ++i)
{
for (uint32_t j = 1; j < h; ++j)
{
// TODO limit max displacement for cylinder
NvVec3& pnt = toNvShared(vertices[i * (h + 1) + j + firstVertex].p);
pnt += stepper.getNormal(i, j) * nEval.sample(pnt);
}
}
fillEdgesAndFaces(edges, facets, h, w, firstVertex, vertices.size(), id, interiorMaterialId);
}
uint32_t unsignedMod(int32_t n, uint32_t modulus)
{
const int32_t d = n / (int32_t)modulus;
const int32_t m = n - d * (int32_t)modulus;
return m >= 0 ? (uint32_t)m : (uint32_t)m + modulus;
}
void calculateNormals(std::vector<Vertex>& vertices, uint32_t h, uint32_t w, bool inverseNormals = false)
{
for (uint32_t i = 1; i < w; ++i)
{
for (uint32_t j = 1; j < h; ++j)
{
int32_t idx = i * (h + 1) + j;
NvVec3 v1 = toNvShared(vertices[idx + h + 1].p - vertices[idx].p);
NvVec3 v2 = toNvShared(vertices[idx + 1].p - vertices[idx].p);
NvVec3 v3 = toNvShared(vertices[idx - (h + 1)].p - vertices[idx].p);
NvVec3 v4 = toNvShared(vertices[idx - 1].p - vertices[idx].p);
NvVec3& n = toNvShared(vertices[idx].n);
n = v1.cross(v2) + v2.cross(v3) + v3.cross(v4) + v4.cross(v1);
if (inverseNormals)
{
n = -n;
}
n.normalize();
}
}
}
Mesh* getNoisyCuttingBoxPair(const nvidia::NvVec3& point, const nvidia::NvVec3& normal, float size, float jaggedPlaneSize,
nvidia::NvVec3 resolution, int64_t id, float amplitude, float frequency, int32_t octaves,
int32_t seed, int32_t interiorMaterialId)
{
NvVec3 t1, t2;
NvVec3 lNormal = normal.getNormalized();
getTangents(lNormal, t1, t2);
float sz = 2.f * jaggedPlaneSize;
uint32_t resolutionX =
std::max(1u, (uint32_t)std::roundf(sz * std::abs(t1.x) * resolution.x + sz * std::abs(t1.y) * resolution.y +
sz * std::abs(t1.z) * resolution.z));
uint32_t resolutionY =
std::max(1u, (uint32_t)std::roundf(sz * std::abs(t2.x) * resolution.x + sz * std::abs(t2.y) * resolution.y +
sz * std::abs(t2.z) * resolution.z));
PlaneStepper stepper(normal, point, jaggedPlaneSize, jaggedPlaneSize, resolutionX, resolutionY);
SimplexNoise nEval(amplitude, frequency, octaves, seed);
std::vector<Vertex> vertices;
vertices.reserve((resolutionX + 1) * (resolutionY + 1) + 12);
std::vector<Edge> edges;
std::vector<Facet> facets;
getNoisyFace(vertices, edges, facets, resolutionX, resolutionY, nvidia::NvVec2(0.f),
nvidia::NvVec2(UV_SCALE / resolutionX, UV_SCALE / resolutionY), stepper, nEval, id, interiorMaterialId);
calculateNormals(vertices, resolutionX, resolutionY);
uint32_t offset = (resolutionX + 1) * (resolutionY + 1);
vertices.resize(offset + 12);
toNvShared(vertices[0 + offset].p) = point + (t1 + t2) * size;
toNvShared(vertices[1 + offset].p) = point + (t2 - t1) * size;
toNvShared(vertices[2 + offset].p) = point + (-t1 - t2) * size;
toNvShared(vertices[3 + offset].p) = point + (t1 - t2) * size;
toNvShared(vertices[8 + offset].p) = point + (t1 + t2) * jaggedPlaneSize;
toNvShared(vertices[9 + offset].p) = point + (t2 - t1) * jaggedPlaneSize;
toNvShared(vertices[10 + offset].p) = point + (-t1 - t2) * jaggedPlaneSize;
toNvShared(vertices[11 + offset].p) = point + (t1 - t2) * jaggedPlaneSize;
toNvShared(vertices[4 + offset].p) = point + (t1 + t2 + lNormal) * size;
toNvShared(vertices[5 + offset].p) = point + (t2 - t1 + lNormal) * size;
toNvShared(vertices[6 + offset].p) = point + (-t1 - t2 + lNormal) * size;
toNvShared(vertices[7 + offset].p) = point + (t1 - t2 + lNormal) * size;
int32_t edgeOffset = edges.size();
edges.push_back({0 + offset, 1 + offset});
edges.push_back({ 1 + offset, 2 + offset });
edges.push_back({ 2 + offset, 3 + offset });
edges.push_back({3 + offset, 0 + offset});
edges.push_back({ 11 + offset, 10 + offset });
edges.push_back({ 10 + offset, 9 + offset });
edges.push_back({ 9 + offset, 8 + offset });
edges.push_back({ 8 + offset, 11 + offset });
facets.push_back({ edgeOffset, 8, id, interiorMaterialId, -1 });
edges.push_back({ 0 + offset, 3 + offset });
edges.push_back({ 3 + offset, 7 + offset });
edges.push_back({ 7 + offset, 4 + offset });
edges.push_back({ 4 + offset, 0 + offset });
facets.push_back({ 8 + edgeOffset, 4, id, interiorMaterialId, -1 });
edges.push_back({ 3 + offset, 2 + offset });
edges.push_back({ 2 + offset, 6 + offset });
edges.push_back({ 6 + offset, 7 + offset });
edges.push_back({ 7 + offset, 3 + offset });
facets.push_back({ 12 + edgeOffset, 4, id, interiorMaterialId, -1 });
edges.push_back({ 5 + offset, 6 + offset });
edges.push_back({ 6 + offset, 2 + offset });
edges.push_back({ 2 + offset, 1 + offset });
edges.push_back({ 1 + offset, 5 + offset });
facets.push_back({ 16 + edgeOffset, 4, id, interiorMaterialId, -1 });
edges.push_back({ 4 + offset, 5 + offset });
edges.push_back({ 5 + offset, 1 + offset });
edges.push_back({ 1 + offset, 0 + offset });
edges.push_back({ 0 + offset, 4 + offset });
facets.push_back({ 20 + edgeOffset, 4, id, interiorMaterialId, -1 });
edges.push_back({ 4 + offset, 7 + offset });
edges.push_back({ 7 + offset, 6 + offset });
edges.push_back({ 6 + offset, 5 + offset });
edges.push_back({ 5 + offset, 4 + offset });
facets.push_back({ 24 + edgeOffset, 4, id, interiorMaterialId, -1 });
//
return new MeshImpl(vertices.data(), edges.data(), facets.data(), vertices.size(), edges.size(), facets.size());
}
Mesh* getBigBox(const NvVec3& point, float size, int32_t interiorMaterialId)
{
NvVec3 normal(0, 0, 1);
normal.normalize();
NvVec3 t1, t2;
getTangents(normal, t1, t2);
std::vector<Vertex> positions(8);
toNvShared(positions[0].p) = point + (t1 + t2 - normal) * size;
toNvShared(positions[1].p) = point + (t2 - t1 - normal) * size;
toNvShared(positions[2].p) = point + (-t1 - t2 - normal) * size;
toNvShared(positions[3].p) = point + (t1 - t2 - normal) * size;
toNvShared(positions[4].p) = point + (t1 + t2 + normal) * size;
toNvShared(positions[5].p) = point + (t2 - t1 + normal) * size;
toNvShared(positions[6].p) = point + (-t1 - t2 + normal) * size;
toNvShared(positions[7].p) = point + (t1 - t2 + normal) * size;
positions[0].uv[0] = {0, 0};
positions[1].uv[0] = {UV_SCALE, 0};
positions[2].uv[0] = {UV_SCALE, UV_SCALE};
positions[3].uv[0] = {0, UV_SCALE};
positions[4].uv[0] = {0, 0};
positions[5].uv[0] = {UV_SCALE, 0};
positions[6].uv[0] = {UV_SCALE, UV_SCALE};
positions[7].uv[0] = {0, UV_SCALE};
std::vector<Edge> edges;
std::vector<Facet> facets;
edges.push_back({0, 1});
edges.push_back({1, 2});
edges.push_back({2, 3});
edges.push_back({3, 0});
facets.push_back({0, 4, 0, interiorMaterialId, -1});
edges.push_back({0, 3});
edges.push_back({3, 7});
edges.push_back({7, 4});
edges.push_back({4, 0});
facets.push_back({4, 4, 0, interiorMaterialId, -1});
edges.push_back({3, 2});
edges.push_back({2, 6});
edges.push_back({6, 7});
edges.push_back({7, 3});
facets.push_back({8, 4, 0, interiorMaterialId, -1});
edges.push_back({5, 6});
edges.push_back({6, 2});
edges.push_back({2, 1});
edges.push_back({1, 5});
facets.push_back({12, 4, 0, interiorMaterialId, -1});
edges.push_back({4, 5});
edges.push_back({5, 1});
edges.push_back({1, 0});
edges.push_back({0, 4});
facets.push_back({16, 4, 0, interiorMaterialId, -1});
edges.push_back({4, 7});
edges.push_back({7, 6});
edges.push_back({6, 5});
edges.push_back({5, 4});
facets.push_back({20, 4, 0, interiorMaterialId, -1});
for (int i = 0; i < 8; ++i)
positions[i].n = {0, 0, 0};
return new MeshImpl(positions.data(), edges.data(), facets.data(), static_cast<uint32_t>(positions.size()),
static_cast<uint32_t>(edges.size()), static_cast<uint32_t>(facets.size()));
}
bool CmpSharedFace::
operator()(const std::pair<nvidia::NvVec3, nvidia::NvVec3>& pv1, const std::pair<nvidia::NvVec3, nvidia::NvVec3>& pv2) const
{
CmpVec vc;
if ((pv1.first - pv2.first).magnitude() < 1e-5)
{
return vc(pv1.second, pv2.second);
}
return vc(pv1.first, pv2.first);
}
#define INDEXER_OFFSET (1ll << 32)
void buildCuttingConeFaces(const CutoutConfiguration& conf, const std::vector<std::vector<nvidia::NvVec3> >& cutoutPoints,
float heightBot, float heightTop, float conicityBot, float conicityTop, int64_t& id,
int32_t seed, int32_t interiorMaterialId, SharedFacesMap& sharedFacesMap)
{
if (conf.noise.amplitude <= FLT_EPSILON)
{
return;
}
std::map<nvidia::NvVec3, std::pair<uint32_t, std::vector<nvidia::NvVec3> >, CmpVec> newCutoutPoints;
uint32_t resH = std::max((uint32_t)std::roundf((heightBot + heightTop) / conf.noise.samplingInterval.z), 1u);
// generate noisy faces
SimplexNoise nEval(conf.noise.amplitude, conf.noise.frequency, conf.noise.octaveNumber, seed);
for (uint32_t i = 0; i < cutoutPoints.size(); i++)
{
auto& points = cutoutPoints[i];
uint32_t pointCount = points.size();
float finalP = 0, currentP = 0;
for (uint32_t j = 0; j < pointCount; j++)
{
finalP += (points[(j + 1) % pointCount] - points[j]).magnitude();
}
for (uint32_t p = 0; p < pointCount; p++)
{
auto p0 = points[p];
auto p1 = points[(p + 1) % pointCount];
auto cp0 = newCutoutPoints.find(p0);
if (cp0 == newCutoutPoints.end())
{
newCutoutPoints[p0] = std::make_pair(0u, std::vector<nvidia::NvVec3>(resH + 1, nvidia::NvVec3(0.f)));
cp0 = newCutoutPoints.find(p0);
}
auto cp1 = newCutoutPoints.find(p1);
if (cp1 == newCutoutPoints.end())
{
newCutoutPoints[p1] = std::make_pair(0u, std::vector<nvidia::NvVec3>(resH + 1, nvidia::NvVec3(0.f)));
cp1 = newCutoutPoints.find(p1);
}
auto vec = p1 - p0;
auto cPos = (p0 + p1) * 0.5f;
uint32_t numPts = (uint32_t)(std::abs(vec.x) / conf.noise.samplingInterval.x +
std::abs(vec.y) / conf.noise.samplingInterval.y) +
1;
auto normal = vec.cross(nvidia::NvVec3(0, 0, 1));
normal = normal;
auto p00 = p0 * conicityBot;
p00.z = -heightBot;
auto p01 = p1 * conicityBot;
p01.z = -heightBot;
auto p10 = p0 * conicityTop;
p10.z = heightTop;
auto p11 = p1 * conicityTop;
p11.z = heightTop;
PlaneStepper stepper(p00, p01, p10, p11, resH, numPts);
PlaneStepper stepper1(normal, cPos, heightTop, vec.magnitude() * 0.5f, resH, numPts, true);
stepper1.getNormal(0, 0);
auto t = std::make_pair(p0, p1);
auto sfIt = sharedFacesMap.find(t);
if (sfIt == sharedFacesMap.end() && sharedFacesMap.find(std::make_pair(p1, p0)) == sharedFacesMap.end())
{
sharedFacesMap[t] = SharedFace(numPts, resH, -(id + INDEXER_OFFSET), interiorMaterialId);
sfIt = sharedFacesMap.find(t);
auto& SF = sfIt->second;
getNoisyFace(SF.vertices, SF.edges, SF.facets, resH, numPts,
nvidia::NvVec2(0, CYLINDER_UV_SCALE * currentP / (heightBot + heightTop)),
nvidia::NvVec2(CYLINDER_UV_SCALE / resH,
CYLINDER_UV_SCALE * vec.magnitude() / (heightBot + heightTop) / numPts),
stepper, nEval, id++ + INDEXER_OFFSET, interiorMaterialId, true);
currentP += vec.magnitude();
cp0->second.first++;
cp1->second.first++;
for (uint32_t k = 0; k <= resH; k++)
{
cp0->second.second[k] += toNvShared(SF.vertices[k].p);
cp1->second.second[k] += toNvShared(SF.vertices[SF.vertices.size() - resH - 1 + k].p);
}
}
}
}
// limit faces displacement iteratively
for (uint32_t i = 0; i < cutoutPoints.size(); i++)
{
auto& points = cutoutPoints[i];
uint32_t pointCount = points.size();
for (uint32_t p = 0; p < pointCount; p++)
{
auto p0 = points[p];
auto p1 = points[(p + 1) % pointCount];
auto p2 = points[(p + 2) % pointCount];
auto& cp1 = newCutoutPoints.find(p1)->second;
float d = nvidia::NvClamp((p1 - p0).getNormalized().dot((p2 - p1).getNormalized()), 0.f, 1.f);
for (uint32_t h = 0; h <= resH; h++)
{
float z = cp1.second[h].z;
float conicity = (conicityBot * h + conicityTop * (resH - h)) / resH;
cp1.second[h] = cp1.second[h] * d + p1 * cp1.first * conicity * (1.f - d);
cp1.second[h].z = z;
}
}
}
// relax nearby points for too big faces displacement limitations
for (uint32_t i = 0; i < cutoutPoints.size(); i++)
{
auto& points = cutoutPoints[i];
uint32_t pointCount = points.size();
for (uint32_t p = 0; p < pointCount; p++)
{
auto p0 = points[p];
auto p1 = points[(p + 1) % pointCount];
auto& cp0 = newCutoutPoints.find(p0)->second;
auto& cp1 = newCutoutPoints.find(p1)->second;
auto SFIt = sharedFacesMap.find(std::make_pair(p0, p1));
uint32_t idx0 = 0, idx1;
if (SFIt == sharedFacesMap.end())
{
SFIt = sharedFacesMap.find(std::make_pair(p1, p0));
idx1 = 0;
idx0 = SFIt->second.w * (SFIt->second.h + 1);
}
else
{
idx1 = SFIt->second.w * (SFIt->second.h + 1);
}
for (uint32_t h = 0; h <= resH; h++)
{
float z = cp1.second[h].z;
float R0 = (cp0.second[h] / cp0.first - toNvShared(SFIt->second.vertices[idx0 + h].p)).magnitude();
float R1 = (cp1.second[h] / cp1.first - toNvShared(SFIt->second.vertices[idx1 + h].p)).magnitude();
float R = R0 - R1;
float r = 0.25f * (cp1.second[h] / cp1.first - cp0.second[h] / cp0.first).magnitude();
float conicity = (conicityBot * h + conicityTop * (resH - h)) / resH;
if (R > r)
{
float w = std::min(1.f, r / R);
cp1.second[h] = cp1.second[h] * w + p1 * cp1.first * conicity * (1.f - w);
cp1.second[h].z = z;
}
}
}
for (int32_t p = pointCount - 1; p >= 0; p--)
{
auto p0 = points[p];
auto p1 = points[unsignedMod(p - 1, pointCount)];
auto& cp0 = newCutoutPoints.find(p0)->second;
auto& cp1 = newCutoutPoints.find(p1)->second;
auto SFIt = sharedFacesMap.find(std::make_pair(p0, p1));
uint32_t idx0 = 0, idx1;
if (SFIt == sharedFacesMap.end())
{
SFIt = sharedFacesMap.find(std::make_pair(p1, p0));
idx1 = 0;
idx0 = SFIt->second.w * (SFIt->second.h + 1);
}
else
{
idx1 = SFIt->second.w * (SFIt->second.h + 1);
}
for (uint32_t h = 0; h <= resH; h++)
{
float z = cp1.second[h].z;
float R0 = (cp0.second[h] / cp0.first - toNvShared(SFIt->second.vertices[idx0 + h].p)).magnitude();
float R1 = (cp1.second[h] / cp1.first - toNvShared(SFIt->second.vertices[idx1 + h].p)).magnitude();
float R = R0 - R1;
float r = 0.25f * (cp1.second[h] / cp1.first - cp0.second[h] / cp0.first).magnitude();
float conicity = (conicityBot * h + conicityTop * (resH - h)) / resH;
if (R > r)
{
float w = std::min(1.f, r / R);
cp1.second[h] = cp1.second[h] * w + p1 * cp1.first * conicity * (1.f - w);
cp1.second[h].z = z;
}
}
}
}
// glue faces
for (auto& SF : sharedFacesMap)
{
auto& cp0 = newCutoutPoints.find(SF.first.first)->second;
auto& cp1 = newCutoutPoints.find(SF.first.second)->second;
auto& v = SF.second.vertices;
float invW = 1.f / SF.second.w;
for (uint32_t w = 0; w <= SF.second.w; w++)
{
for (uint32_t h = 0; h <= SF.second.h; h++)
{
toNvShared(v[w * (SF.second.h + 1) + h].p) +=
((cp0.second[h] / cp0.first - toNvShared(v[h].p)) * (SF.second.w - w) +
(cp1.second[h] / cp1.first - toNvShared(v[SF.second.w * (SF.second.h + 1) + h].p)) * w) *
invW;
}
}
}
}
Mesh* getNoisyCuttingCone(const std::vector<nvidia::NvVec3>& points, const std::set<int32_t>& smoothingGroups,
const nvidia::NvTransform& transform, bool useSmoothing, float heightBot, float heightTop,
float conicityMultiplierBot, float conicityMultiplierTop, nvidia::NvVec3 samplingInterval,
int32_t interiorMaterialId, const SharedFacesMap& sharedFacesMap, bool inverseNormals)
{
NV_UNUSED(conicityMultiplierTop);
NV_UNUSED(conicityMultiplierBot);
uint32_t pointCount = points.size();
uint32_t resP = pointCount;
for (uint32_t i = 0; i < pointCount; i++)
{
auto vec = (points[(i + 1) % pointCount] - points[i]);
resP += (uint32_t)(std::abs(vec.x) / samplingInterval.x + std::abs(vec.y) / samplingInterval.y);
}
uint32_t resH = std::max((uint32_t)std::roundf((heightBot + heightTop) / samplingInterval.z), 1u);
std::vector<Vertex> positions;
positions.reserve((resH + 1) * (resP + 1));
std::vector<Edge> edges;
edges.reserve(resH * resP * 6 + (resP + 1) * 2);
std::vector<Facet> facets;
facets.reserve(resH * resP * 2 + 2);
uint32_t pCount = 0;
int sg = useSmoothing ? 1 : -1;
for (uint32_t p = 0; p < pointCount; p++)
{
if (useSmoothing && smoothingGroups.find(p) != smoothingGroups.end())
{
sg = sg ^ 3;
}
auto p0 = points[p];
auto p1 = points[(p + 1) % pointCount];
uint32_t firstVertexIndex = positions.size();
uint32_t firstEdgeIndex = edges.size();
auto sfIt = sharedFacesMap.find(std::make_pair(p0, p1));
int32_t vBegin = 0, vEnd = -1, vIncr = 1;
if (sfIt == sharedFacesMap.end())
{
sfIt = sharedFacesMap.find(std::make_pair(p1, p0));
;
vBegin = sfIt->second.w;
vIncr = -1;
}
else
{
vEnd = sfIt->second.w + 1;
}
auto& SF = sfIt->second;
positions.resize(firstVertexIndex + (SF.w + 1) * (SF.h + 1));
if (vBegin < vEnd)
{
for (auto& e : SF.edges)
{
edges.push_back({e.s + firstVertexIndex, e.e + firstVertexIndex});
}
for (auto& f : SF.facets)
{
facets.push_back(f);
facets.back().firstEdgeNumber += firstEdgeIndex;
facets.back().smoothingGroup = sg;
}
}
else
{
fillEdgesAndFaces(edges, facets, SF.h, SF.w, firstVertexIndex, positions.size(), SF.f.userData,
SF.f.materialId, sg, true);
}
for (int32_t v = vBegin; v != vEnd; v += vIncr)
{
std::copy(SF.vertices.begin() + v * (resH + 1), SF.vertices.begin() + (v + 1) * (SF.h + 1),
positions.begin() + firstVertexIndex);
firstVertexIndex += SF.h + 1;
}
pCount += SF.vertices.size() / (resH + 1) - 1;
}
if (inverseNormals)
{
for (uint32_t e = 0; e < edges.size(); e += 3)
{
std::swap(edges[e + 0].s, edges[e + 0].e);
std::swap(edges[e + 1].s, edges[e + 1].e);
std::swap(edges[e + 2].s, edges[e + 2].e);
std::swap(edges[e + 0], edges[e + 2]);
}
}
uint32_t totalCount = pCount + pointCount;
calculateNormals(positions, resH, totalCount - 1, inverseNormals);
std::vector<float> xPos, yPos;
int32_t ii = 0;
for (auto& p : positions)
{
if ((ii++) % (resH + 1) == 1)
{
xPos.push_back(p.p.x);
yPos.push_back(p.p.y);
}
toNvShared(p.p) = transform.transform(toNvShared(p.p));
toNvShared(p.n) = transform.rotate(toNvShared(p.n));
}
totalCount /= 2;
for (uint32_t i = 0; i < totalCount; i++)
{
uint32_t idx = 2 * i * (resH + 1);
edges.push_back({idx, (idx + 2 * (resH + 1)) % (uint32_t)positions.size()});
}
for (int32_t i = totalCount; i > 0; i--)
{
uint32_t idx = (2 * i + 1) * (resH + 1) - 1;
edges.push_back({ idx % (uint32_t)positions.size(), idx - 2 * (resH + 1)});
}
if (smoothingGroups.find(0) != smoothingGroups.end() || smoothingGroups.find(pointCount - 1) != smoothingGroups.end())
{
if (facets[0].smoothingGroup == facets[facets.size() - 1].smoothingGroup)
{
for (uint32_t i = 0; i < resH; i++)
{
facets[i].smoothingGroup = 4;
}
}
}
facets.push_back({ (int32_t)(resH * pCount * 6), totalCount, 0, interiorMaterialId, -1 });
facets.push_back({ (int32_t)(resH * pCount * 6 + totalCount), totalCount, 0, interiorMaterialId, -1 });
return new MeshImpl(positions.data(), edges.data(), facets.data(), static_cast<uint32_t>(positions.size()),
static_cast<uint32_t>(edges.size()), static_cast<uint32_t>(facets.size()));
}
Mesh* getCuttingCone(const CutoutConfiguration& conf, const std::vector<nvidia::NvVec3>& points,
const std::set<int32_t>& smoothingGroups, float heightBot, float heightTop, float conicityBot,
float conicityTop, int64_t& id, int32_t seed, int32_t interiorMaterialId,
const SharedFacesMap& sharedFacesMap, bool inverseNormals)
{
NV_UNUSED(seed);
uint32_t pointCount = points.size();
if (conf.noise.amplitude > FLT_EPSILON)
{
return getNoisyCuttingCone(points, smoothingGroups, toNvShared(conf.transform), conf.useSmoothing, heightBot, heightTop,
conicityBot, conicityTop, toNvShared(conf.noise.samplingInterval), interiorMaterialId,
sharedFacesMap, inverseNormals);
}
float currentP = 0;
std::vector<Vertex> positions((pointCount + 1) * 2);
std::vector<Edge> edges(pointCount * 6 + 2);
std::vector<Facet> facets(pointCount + 2);
int sg = conf.useSmoothing ? 1 : -1;
for (uint32_t i = 0; i < pointCount + 1; i++)
{
if (conf.useSmoothing && smoothingGroups.find(i) != smoothingGroups.end())
{
sg = sg ^ 3;
}
uint32_t i1 = i + pointCount + 1;
uint32_t i3 = i + 1;
uint32_t i2 = i3 + pointCount + 1;
auto& p0 = positions[i];
auto& p1 = positions[i1];
p0.n = p1.n = {0.f, 0.f, 0.f};
toNvShared(p0.p) = points[i % pointCount] * conicityBot;
p0.p.z = -heightBot;
toNvShared(p1.p) = points[i % pointCount] * conicityTop;
p1.p.z = heightTop;
toNvShared(p0.p) = toNvShared(conf.transform).transform(toNvShared(p0.p));
toNvShared(p1.p) = toNvShared(conf.transform).transform(toNvShared(p1.p));
p0.uv[0] = {0.f, CYLINDER_UV_SCALE * currentP / (heightBot + heightTop)};
p1.uv[0] = {CYLINDER_UV_SCALE, CYLINDER_UV_SCALE * currentP / (heightBot + heightTop)};
if (i == pointCount)
{
break;
}
currentP += (points[(i + 1) % pointCount] - points[i]).magnitude();
int32_t edgeIdx = 4 * i;
if (inverseNormals)
{
edges[edgeIdx + 1] = {i1, i2};
edges[edgeIdx + 2] = {i2, i3};
edges[edgeIdx + 3] = {i3, i};
edges[edgeIdx + 0] = {i, i1};
}
else
{
edges[edgeIdx + 0] = {i, i3};
edges[edgeIdx + 1] = {i3, i2};
edges[edgeIdx + 2] = {i2, i1};
edges[edgeIdx + 3] = {i1, i};
}
facets[i] = {edgeIdx, 4, id++, interiorMaterialId, sg};
edges[5 * pointCount + i + 1] = {i1, i2};
edges[5 * pointCount - i - 1] = {i3, i};
}
edges[5 * pointCount] = {0, pointCount};
edges[6 * pointCount + 1] = {2 * pointCount + 1, pointCount + 1};
if (smoothingGroups.find(0) != smoothingGroups.end() || smoothingGroups.find(pointCount - 1) != smoothingGroups.end())
{
if (facets[0].smoothingGroup == facets[pointCount - 1].smoothingGroup)
{
facets[0].smoothingGroup = 4;
}
}
facets[pointCount + 0] = { 4 * (int32_t)pointCount, pointCount + 1, 0, interiorMaterialId, -1 };
facets[pointCount + 1] = { 5 * (int32_t)pointCount + 1, pointCount + 1, interiorMaterialId, 0, -1 };
return new MeshImpl(positions.data(), edges.data(), facets.data(), static_cast<uint32_t>(positions.size()),
static_cast<uint32_t>(edges.size()), static_cast<uint32_t>(facets.size()));
}
} // namespace Blast
} // namespace Nv | 36,583 | C++ | 36.368744 | 128 | 0.538173 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshNoiser.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
// This warning arises when using some stl containers with older versions of VC
// c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code
#include "NvPreprocessor.h"
#if NV_VC && NV_VC < 14
#pragma warning(disable : 4702)
#endif
#include "NvBlastExtAuthoringMeshNoiser.h"
#include "NvBlastExtAuthoringPerlinNoise.h"
#include <set>
#include <queue>
#include <NvBlastAssert.h>
#include <NvBlastNvSharedHelpers.h>
using namespace Nv::Blast;
using namespace std;
void MeshNoiser::computeFalloffAndNormals()
{
// Map newly created vertices according to positions
computePositionedMapping();
mGeometryGraph.resize(mVertices.size());
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
if (mTrMeshEdToTr[i].c == 0)
{
continue;
}
int32_t v1 = mPositionMappedVrt[mEdges[i].s];
int32_t v2 = mPositionMappedVrt[mEdges[i].e];
if (std::find(mGeometryGraph[v1].begin(), mGeometryGraph[v1].end(), v2) == mGeometryGraph[v1].end())
mGeometryGraph[v1].push_back(v2);
if (std::find(mGeometryGraph[v2].begin(), mGeometryGraph[v2].end(), v1) == mGeometryGraph[v2].end())
mGeometryGraph[v2].push_back(v1);
}
mVerticesDistances.clear();
mVerticesDistances.resize(mVertices.size(), 10000.0f);
std::queue<int32_t> que;
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
if (mTrMeshEdToTr[i].c != 0 && (mEdgeFlag[i] == EXTERNAL_EDGE || mEdgeFlag[i] == EXTERNAL_BORDER_EDGE))
{
int32_t v1 = mPositionMappedVrt[mEdges[i].s];
int32_t v2 = mPositionMappedVrt[mEdges[i].e];
mVerticesDistances[v1] = 0.0f;
mVerticesDistances[v2] = 0.0f;
que.push(v1);
que.push(v2);
}
}
while (!que.empty())
{
int32_t curr = que.front();
que.pop();
for (uint32_t i = 0; i < mGeometryGraph[curr].size(); ++i)
{
int32_t to = mGeometryGraph[curr][i];
float d = mVerticesDistances[curr] + 0.1f; // (mVertices[to].p - mVertices[curr].p).magnitudeSquared();
if (d < mVerticesDistances[to])
{
mVerticesDistances[to] = d;
que.push(to);
}
}
}
for (uint32_t i = 0; i < mVerticesDistances.size(); ++i)
{
int32_t from = mPositionMappedVrt[i];
mVerticesDistances[i] = mVerticesDistances[from];
}
}
bool edgeOverlapTest(NvcVec3& as, NvcVec3& ae, NvcVec3& bs, NvcVec3& be)
{
// return false;
if (std::max(std::min(as.x, ae.x), std::min(bs.x, be.x)) > std::min(std::max(as.x, ae.x), std::max(bs.x, be.x)))
return false;
if (std::max(std::min(as.y, ae.y), std::min(bs.y, be.y)) > std::min(std::max(as.y, ae.y), std::max(bs.y, be.y)))
return false;
if (std::max(std::min(as.z, ae.z), std::min(bs.z, be.z)) > std::min(std::max(as.z, ae.z), std::max(bs.z, be.z)))
return false;
return (toNvShared(bs - as).cross(toNvShared(ae - as))).magnitudeSquared() < 1e-6f &&
(toNvShared(be - as).cross(toNvShared(ae - as))).magnitudeSquared() < 1e-6f;
}
void MeshNoiser::computePositionedMapping()
{
std::map<NvcVec3, int32_t, VrtPositionComparator> mPosMap;
mPositionMappedVrt.clear();
mPositionMappedVrt.resize(mVertices.size());
for (uint32_t i = 0; i < mVertices.size(); ++i)
{
auto it = mPosMap.find(mVertices[i].p);
if (it == mPosMap.end())
{
mPosMap[mVertices[i].p] = i;
mPositionMappedVrt[i] = i;
}
else
{
mPositionMappedVrt[i] = it->second;
}
}
}
void MeshNoiser::relax(int32_t iteration, float factor, std::vector<Vertex>& vertices)
{
std::vector<NvVec3> verticesTemp(vertices.size());
std::vector<NvVec3> normalsTemp(vertices.size());
for (int32_t iter = 0; iter < iteration; ++iter)
{
for (uint32_t i = 0; i < vertices.size(); ++i)
{
if (mRestrictionFlag[i])
{
continue;
}
NvVec3 cps = toNvShared(vertices[i].p);
NvVec3 cns = mVerticesNormalsSmoothed[i];
NvVec3 averaged(0, 0, 0);
NvVec3 averagedNormal(0, 0, 0);
for (uint32_t p = 0; p < mGeometryGraph[mPositionMappedVrt[i]].size(); ++p)
{
int32_t to = mGeometryGraph[mPositionMappedVrt[i]][p];
averaged += toNvShared(vertices[to].p);
averagedNormal += mVerticesNormalsSmoothed[to];
}
averaged *= (1.0f / mGeometryGraph[mPositionMappedVrt[i]].size());
averagedNormal *= (1.0f / mGeometryGraph[mPositionMappedVrt[i]].size());
verticesTemp[i] = cps + (averaged - cps) * factor;
normalsTemp[i] = cns * (1.0f - factor) + averagedNormal * factor;
}
for (uint32_t i = 0; i < vertices.size(); ++i)
{
if (mRestrictionFlag[i])
{
continue;
}
vertices[i].p = fromNvShared(verticesTemp[i]);
mVerticesNormalsSmoothed[i] = normalsTemp[i].getNormalized();
}
}
}
NV_FORCE_INLINE void
markEdge(int32_t ui, int32_t ed, std::vector<MeshNoiser::EdgeFlag>& shortMarkup, std::vector<int32_t>& lastOwner)
{
if (shortMarkup[ed] == MeshNoiser::NONE)
{
if (ui == 0)
{
shortMarkup[ed] = MeshNoiser::EXTERNAL_EDGE;
}
else
{
shortMarkup[ed] = MeshNoiser::INTERNAL_EDGE;
}
lastOwner[ed] = ui;
}
else
{
if (ui != 0)
{
if (shortMarkup[ed] == MeshNoiser::EXTERNAL_EDGE)
{
shortMarkup[ed] = MeshNoiser::EXTERNAL_BORDER_EDGE;
}
if ((shortMarkup[ed] == MeshNoiser::INTERNAL_EDGE) && ui != lastOwner[ed])
{
shortMarkup[ed] = MeshNoiser::INTERNAL_BORDER_EDGE;
}
}
else
{
if (shortMarkup[ed] != MeshNoiser::EXTERNAL_EDGE)
{
shortMarkup[ed] = MeshNoiser::EXTERNAL_BORDER_EDGE;
}
}
}
}
void MeshNoiser::prebuildEdgeFlagArray()
{
mRestrictionFlag.clear();
mRestrictionFlag.resize(mVertices.size());
mEdgeFlag.clear();
mEdgeFlag.resize(mEdges.size(), NONE);
std::map<NvcVec3, int32_t, VrtPositionComparator> mPosMap;
mPositionMappedVrt.clear();
mPositionMappedVrt.resize(mVertices.size(), 0);
for (uint32_t i = 0; i < mVertices.size(); ++i)
{
auto it = mPosMap.find(mVertices[i].p);
if (it == mPosMap.end())
{
mPosMap[mVertices[i].p] = i;
mPositionMappedVrt[i] = i;
}
else
{
mPositionMappedVrt[i] = it->second;
}
}
std::map<Edge, int32_t> mPositionEdgeMap;
std::vector<int32_t> mPositionBasedEdges(mEdges.size());
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
Edge tmp = { mPositionMappedVrt[mEdges[i].s], mPositionMappedVrt[mEdges[i].e] };
if (tmp.e < tmp.s)
std::swap(tmp.e, tmp.s);
auto it = mPositionEdgeMap.find(tmp);
if (it == mPositionEdgeMap.end())
{
mPositionEdgeMap[tmp] = i;
mPositionBasedEdges[i] = i;
}
else
{
mPositionBasedEdges[i] = it->second;
}
}
std::vector<EdgeFlag> shortMarkup(mEdges.size(), NONE);
std::vector<int32_t> lastOwner(mEdges.size(), 0);
std::vector<std::vector<int32_t> > edgeOverlap(mEdges.size());
for (auto it1 = mPositionEdgeMap.begin(); it1 != mPositionEdgeMap.end(); ++it1)
{
auto it2 = it1;
it2++;
for (; it2 != mPositionEdgeMap.end(); ++it2)
{
Edge& ed1 = mEdges[it1->second];
Edge& ed2 = mEdges[it2->second];
if (edgeOverlapTest(mVertices[ed1.s].p, mVertices[ed1.e].p, mVertices[ed2.s].p, mVertices[ed2.e].p))
{
edgeOverlap[it1->second].push_back(it2->second);
}
}
}
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
int32_t ui = mTriangles[i].userData;
int32_t ed = mPositionBasedEdges[findEdge({ mTriangles[i].ea, mTriangles[i].eb })];
markEdge(ui, ed, shortMarkup, lastOwner);
for (uint32_t ov = 0; ov < edgeOverlap[ed].size(); ++ov)
{
markEdge(ui, edgeOverlap[ed][ov], shortMarkup, lastOwner);
}
ed = mPositionBasedEdges[findEdge({ mTriangles[i].ea, mTriangles[i].ec })];
markEdge(ui, ed, shortMarkup, lastOwner);
for (uint32_t ov = 0; ov < edgeOverlap[ed].size(); ++ov)
{
markEdge(ui, edgeOverlap[ed][ov], shortMarkup, lastOwner);
}
ed = mPositionBasedEdges[findEdge({ mTriangles[i].eb, mTriangles[i].ec })];
markEdge(ui, ed, shortMarkup, lastOwner);
for (uint32_t ov = 0; ov < edgeOverlap[ed].size(); ++ov)
{
markEdge(ui, edgeOverlap[ed][ov], shortMarkup, lastOwner);
}
}
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
mEdgeFlag[i] = shortMarkup[mPositionBasedEdges[i]];
}
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
if (mTriangles[i].userData != 0)
continue;
int32_t ed = findEdge({ mTriangles[i].ea, mTriangles[i].eb });
mEdgeFlag[ed] = EXTERNAL_EDGE;
ed = findEdge({ mTriangles[i].ec, mTriangles[i].eb });
mEdgeFlag[ed] = EXTERNAL_EDGE;
ed = findEdge({ mTriangles[i].ea, mTriangles[i].ec });
mEdgeFlag[ed] = EXTERNAL_EDGE;
}
}
NV_FORCE_INLINE int32_t MeshNoiser::addVerticeIfNotExist(const Vertex& p)
{
auto it = mVertMap.find(p);
if (it == mVertMap.end())
{
mVertMap[p] = static_cast<int32_t>(mVertices.size());
mVertices.push_back(p);
return static_cast<int32_t>(mVertices.size()) - 1;
}
else
{
return it->second;
}
}
NV_FORCE_INLINE int32_t MeshNoiser::addEdge(const Edge& e)
{
Edge ed = e;
if (ed.e < ed.s)
std::swap(ed.s, ed.e);
auto it = mEdgeMap.find(ed);
if (it == mEdgeMap.end())
{
mTrMeshEdToTr.push_back(EdgeToTriangles());
mEdgeMap[ed] = (int)mEdgeMap.size();
mEdges.push_back(ed);
mEdgeFlag.push_back(INTERNAL_EDGE);
return (int32_t)mEdges.size() - 1;
}
else
{
return it->second;
}
}
NV_FORCE_INLINE int32_t MeshNoiser::findEdge(const Edge& e)
{
Edge ed = e;
if (ed.e < ed.s)
std::swap(ed.s, ed.e);
auto it = mEdgeMap.find(ed);
if (it == mEdgeMap.end())
{
return -1;
}
else
{
return it->second;
}
}
/**
Weld input vertices, build edge and triangle buffers
*/
void MeshNoiser::setMesh(const vector<Triangle>& mesh)
{
uint32_t a, b, c;
nvidia::NvBounds3 box;
box.setEmpty();
for (uint32_t i = 0; i < mesh.size(); ++i)
{
const Triangle& tr = mesh[i];
a = addVerticeIfNotExist(tr.a);
b = addVerticeIfNotExist(tr.b);
c = addVerticeIfNotExist(tr.c);
box.include(toNvShared(tr.a.p));
box.include(toNvShared(tr.b.p));
box.include(toNvShared(tr.c.p));
addEdge({ a, b });
addEdge({ b, c });
addEdge({ a, c });
mTriangles.push_back({a, b, c});
mTriangles.back().userData = tr.userData;
mTriangles.back().materialId = tr.materialId;
mTriangles.back().smoothingGroup = tr.smoothingGroup;
}
mOffset = box.getCenter();
mScale = max(box.getExtents(0), max(box.getExtents(1), box.getExtents(2)));
float invScale = 1.0f / mScale;
for (uint32_t i = 0; i < mVertices.size(); ++i)
{
mVertices[i].p = mVertices[i].p - fromNvShared(box.getCenter());
mVertices[i].p = mVertices[i].p * invScale;
}
}
void MeshNoiser::tesselateInternalSurface(float maxLenIn)
{
if (mTriangles.empty())
{
return;
}
updateEdgeTriangleInfo();
prebuildEdgeFlagArray();
mRestrictionFlag.resize(mVertices.size(), 0);
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
if (mEdgeFlag[i] == EXTERNAL_EDGE || mEdgeFlag[i] == EXTERNAL_BORDER_EDGE || mEdgeFlag[i] == INTERNAL_BORDER_EDGE)
{
mRestrictionFlag[mEdges[i].s] = 1;
mRestrictionFlag[mEdges[i].e] = 1;
}
}
float maxLen = maxLenIn;
float mlSq = maxLen * maxLen;
float minD = maxLen * 0.5f;
minD = minD * minD;
for (int32_t iter = 0; iter < 15; ++iter)
{
updateVertEdgeInfo();
uint32_t oldSize = (uint32_t)mEdges.size();
for (uint32_t i = 0; i < oldSize; ++i)
{
if (mEdgeFlag[i] == EXTERNAL_EDGE || mEdgeFlag[i] == INTERNAL_BORDER_EDGE)
{
continue;
}
if (toNvShared(mVertices[mEdges[i].s].p - mVertices[mEdges[i].e].p).magnitudeSquared() < minD)
{
collapseEdge(i);
}
}
oldSize = (uint32_t)mEdges.size();
updateEdgeTriangleInfo();
for (uint32_t i = 0; i < oldSize; ++i)
{
if (mEdgeFlag[i] == EXTERNAL_EDGE)
{
continue;
}
if (toNvShared(mVertices[mEdges[i].s].p - mVertices[mEdges[i].e].p).magnitudeSquared() > mlSq)
{
divideEdge(i);
}
}
}
computeFalloffAndNormals();
prebuildTesselatedTriangles();
isTesselated = true;
}
void MeshNoiser::updateEdgeTriangleInfo()
{
mTrMeshEdToTr.clear();
mTrMeshEdToTr.resize(mEdges.size());
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
TriangleIndexed& tr = mTriangles[i];
if (tr.ea == kNotValidVertexIndex)
continue;
int32_t ed = addEdge({ tr.ea, tr.eb });
mTrMeshEdToTr[ed].add(i);
ed = addEdge({ tr.ea, tr.ec });
mTrMeshEdToTr[ed].add(i);
ed = addEdge({ tr.ec, tr.eb });
mTrMeshEdToTr[ed].add(i);
}
}
void MeshNoiser::updateVertEdgeInfo()
{
mVertexToTriangleMap.clear();
mVertexToTriangleMap.resize(mVertices.size());
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
TriangleIndexed& tr = mTriangles[i];
if (tr.ea == kNotValidVertexIndex)
continue;
mVertexToTriangleMap[tr.ea].push_back(i);
mVertexToTriangleMap[tr.eb].push_back(i);
mVertexToTriangleMap[tr.ec].push_back(i);
}
mVertexValence.clear();
mVertexValence.resize(mVertices.size(), 0);
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
if (mTrMeshEdToTr[i].c != 0)
{
mVertexValence[mEdges[i].s]++;
mVertexValence[mEdges[i].e]++;
}
}
}
inline bool isContainEdge(const TriangleIndexed& t, uint32_t a, uint32_t b)
{
return (a == t.ea || a == t.eb || a == t.ec) && (b == t.ea || b == t.eb || b == t.ec);
}
void MeshNoiser::collapseEdge(int32_t id)
{
Edge cEdge = mEdges[id];
uint32_t from = cEdge.s;
uint32_t to = cEdge.e;
if (mRestrictionFlag[from] && mRestrictionFlag[to])
{
return;
}
if (mVertexValence[from] > mVertexValence[to])
{
std::swap(from, to);
}
if (mRestrictionFlag[from])
{
std::swap(from, to);
}
std::set<int32_t> connectedToBegin;
std::set<int32_t> connectedToEnd;
std::set<int32_t> neighborTriangles;
int32_t trWithEdge[2] = { -1, -1 };
int32_t cntr = 0;
for (uint32_t i = 0; i < mVertexToTriangleMap[from].size(); ++i)
{
if (mTriangles[mVertexToTriangleMap[from][i]].ea == kNotValidVertexIndex)
continue;
if (neighborTriangles.insert(mVertexToTriangleMap[from][i]).second &&
isContainEdge(mTriangles[mVertexToTriangleMap[from][i]] , from, to))
{
trWithEdge[cntr] = mVertexToTriangleMap[from][i];
cntr++;
}
}
for (uint32_t i = 0; i < mVertexToTriangleMap[to].size(); ++i)
{
if (mTriangles[mVertexToTriangleMap[to][i]].ea == kNotValidVertexIndex)
continue;
if (neighborTriangles.insert(mVertexToTriangleMap[to][i]).second &&
isContainEdge(mTriangles[mVertexToTriangleMap[to][i]], from, to))
{
trWithEdge[cntr] = mVertexToTriangleMap[to][i];
cntr++;
}
}
if (cntr == 0)
{
return;
}
if (cntr > 2)
{
return;
}
for (uint32_t i : neighborTriangles)
{
if (mTriangles[i].ea == from || mTriangles[i].eb == from || mTriangles[i].ec == from)
{
if (mTriangles[i].ea != to && mTriangles[i].ea != from)
connectedToBegin.insert(mTriangles[i].ea);
if (mTriangles[i].eb != to && mTriangles[i].eb != from)
connectedToBegin.insert(mTriangles[i].eb);
if (mTriangles[i].ec != to && mTriangles[i].ec != from)
connectedToBegin.insert(mTriangles[i].ec);
}
if (mTriangles[i].ea == to || mTriangles[i].eb == to || mTriangles[i].ec == to)
{
if (mTriangles[i].ea != to && mTriangles[i].ea != from)
connectedToEnd.insert(mTriangles[i].ea);
if (mTriangles[i].eb != to && mTriangles[i].eb != from)
connectedToEnd.insert(mTriangles[i].eb);
if (mTriangles[i].ec != to && mTriangles[i].ec != from)
connectedToEnd.insert(mTriangles[i].ec);
}
}
bool canBeCollapsed = true;
for (auto it = connectedToBegin.begin(); it != connectedToBegin.end(); ++it)
{
uint32_t currV = *it;
if (connectedToEnd.find(currV) == connectedToEnd.end())
continue;
bool found = false;
for (int32_t tr : neighborTriangles)
{
if ((mTriangles[tr].ea == from || mTriangles[tr].eb == from || mTriangles[tr].ec == from) &&
(mTriangles[tr].ea == to || mTriangles[tr].eb == to || mTriangles[tr].ec == to) &&
(mTriangles[tr].ea == currV || mTriangles[tr].eb == currV || mTriangles[tr].ec == currV))
{
found = true;
break;
}
}
if (!found)
{
canBeCollapsed = false;
break;
}
}
if (canBeCollapsed)
{
for (int32_t i : neighborTriangles)
{
if (trWithEdge[0] == i)
continue;
if (cntr == 2 && trWithEdge[1] == i)
continue;
TriangleIndexed tr = mTriangles[i];
NvVec3 oldNormal =
toNvShared(mVertices[tr.eb].p - mVertices[tr.ea].p).cross(toNvShared(mVertices[tr.ec].p - mVertices[tr.ea].p));
if (tr.ea == from)
{
tr.ea = to;
}
else if (tr.eb == from)
{
tr.eb = to;
}
else if (tr.ec == from)
{
tr.ec = to;
}
NvVec3 newNormal =
toNvShared(mVertices[tr.eb].p - mVertices[tr.ea].p).cross(toNvShared(mVertices[tr.ec].p - mVertices[tr.ea].p));
if (newNormal.magnitude() < 1e-8f)
{
canBeCollapsed = false;
break;
}
if (oldNormal.dot(newNormal) < 0)
{
canBeCollapsed = false;
break;
}
}
mTriangles[trWithEdge[0]].ea = kNotValidVertexIndex;
if (cntr == 2)
mTriangles[trWithEdge[1]].ea = kNotValidVertexIndex;
for (int32_t i : neighborTriangles)
{
if (mTriangles[i].ea == kNotValidVertexIndex)
continue;
if (mTriangles[i].ea == from)
{
mTriangles[i].ea = to;
mVertexToTriangleMap[from].clear();
mVertexToTriangleMap[to].push_back(i);
}
else if (mTriangles[i].eb == from)
{
mTriangles[i].eb = to;
mVertexToTriangleMap[from].clear();
mVertexToTriangleMap[to].push_back(i);
}
else if (mTriangles[i].ec == from)
{
mTriangles[i].ec = to;
mVertexToTriangleMap[from].clear();
mVertexToTriangleMap[to].push_back(i);
}
}
}
}
void MeshNoiser::divideEdge(int32_t id)
{
if (mTrMeshEdToTr[id].c == 0)
{
return;
}
Edge cEdge = mEdges[id];
EdgeFlag snapRestriction = mEdgeFlag[id];
Vertex middle;
uint32_t nv = kNotValidVertexIndex;
for (int32_t t = 0; t < mTrMeshEdToTr[id].c; ++t)
{
int32_t oldTriangleIndex = mTrMeshEdToTr[id].tr[t];
TriangleIndexed tr = mTriangles[mTrMeshEdToTr[id].tr[t]];
if (tr.ea == kNotValidVertexIndex)
{
continue;
}
uint32_t pbf[3];
pbf[0] = tr.ea;
pbf[1] = tr.eb;
pbf[2] = tr.ec;
for (int32_t p = 0; p < 3; ++p)
{
int32_t pnx = (p + 1) % 3;
int32_t opp = (p + 2) % 3;
if ((pbf[p] == cEdge.s && pbf[pnx] == cEdge.e) || (pbf[p] == cEdge.e && pbf[pnx] == cEdge.s))
{
if (nv == kNotValidVertexIndex)
{
middle.p = (mVertices[pbf[p]].p + mVertices[pbf[pnx]].p) * 0.5f;
middle.n = (mVertices[pbf[p]].n + mVertices[pbf[pnx]].n) * 0.5f;
middle.uv[0] = (mVertices[pbf[p]].uv[0] + mVertices[pbf[pnx]].uv[0]) * 0.5f;
nv = (uint32_t)mVertices.size();
mVertices.push_back(middle);
}
if (nv < mRestrictionFlag.size())
{
mRestrictionFlag[nv] =
((snapRestriction == EXTERNAL_BORDER_EDGE) || (snapRestriction == INTERNAL_BORDER_EDGE));
}
else
{
mRestrictionFlag.push_back((snapRestriction == EXTERNAL_BORDER_EDGE) ||
(snapRestriction == INTERNAL_BORDER_EDGE));
}
uint32_t ind1 = addEdge({ pbf[p], nv });
uint32_t ind2 = addEdge({ nv, pbf[pnx] });
uint32_t ind3 = addEdge({ nv, pbf[opp] });
mEdgeFlag[ind1] = snapRestriction;
mEdgeFlag[ind2] = snapRestriction;
mEdgeFlag[ind3] = INTERNAL_EDGE;
mTrMeshEdToTr[ind1].add(mTrMeshEdToTr[id].tr[t]);
int32_t userInfo = mTriangles[mTrMeshEdToTr[id].tr[t]].userData;
int32_t matId = mTriangles[mTrMeshEdToTr[id].tr[t]].materialId;
int32_t smId = mTriangles[mTrMeshEdToTr[id].tr[t]].smoothingGroup;
mTriangles[mTrMeshEdToTr[id].tr[t]] = {pbf[p], nv, pbf[opp]};
mTriangles[mTrMeshEdToTr[id].tr[t]].userData = userInfo;
mTriangles[mTrMeshEdToTr[id].tr[t]].materialId = matId;
mTriangles[mTrMeshEdToTr[id].tr[t]].smoothingGroup = smId;
mTrMeshEdToTr[ind2].add((int32_t)mTriangles.size());
mTrMeshEdToTr[ind3].add((int32_t)mTrMeshEdToTr[id].tr[t]);
mTrMeshEdToTr[ind3].add((int32_t)mTriangles.size());
mTriangles.push_back({nv, pbf[pnx], pbf[opp]});
mTriangles.back().userData = userInfo;
mTriangles.back().materialId = matId;
mTriangles.back().smoothingGroup = smId;
int32_t ed1 = findEdge({ pbf[pnx], pbf[opp] });
mTrMeshEdToTr[ed1].replace(oldTriangleIndex, (int32_t)mTriangles.size() - 1);
break;
}
}
}
}
float falloffFunction(float x, float mx)
{
float t = (x) / (mx + 1e-6f);
t = std::min(1.0f, t);
return t * t;
}
void MeshNoiser::recalcNoiseDirs()
{
/**
Compute normals direction to apply noise
*/
mVerticesNormalsSmoothed.resize(mVertices.size(), NvVec3(0, 0, 0));
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
if (mTriangles[i].ea == kNotValidVertexIndex)
{
continue;
}
TriangleIndexed& tr = mTriangles[i];
if (tr.userData == 0)
continue;
if (tr.userData < 0)
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ea]] += toNvShared(mVertices[tr.ea].n).getNormalized();
else
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ea]] -= toNvShared(mVertices[tr.ea].n).getNormalized();
if (tr.userData < 0)
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.eb]] += toNvShared(mVertices[tr.eb].n).getNormalized();
else
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.eb]] -= toNvShared(mVertices[tr.eb].n).getNormalized();
if (tr.userData < 0)
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ec]] += toNvShared(mVertices[tr.ec].n).getNormalized();
else
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ec]] -= toNvShared(mVertices[tr.ec].n).getNormalized();
}
for (uint32_t i = 0; i < mVerticesNormalsSmoothed.size(); ++i)
{
mVerticesNormalsSmoothed[i] = mVerticesNormalsSmoothed[mPositionMappedVrt[i]];
mVerticesNormalsSmoothed[i].normalize();
}
}
void MeshNoiser::applyNoise(SimplexNoise& noise, float falloff, int32_t /*relaxIterations*/, float /*relaxFactor*/)
{
NVBLAST_ASSERT(isTesselated);
if (isTesselated == false)
{
return;
}
mRestrictionFlag.clear();
mRestrictionFlag.resize(mVertices.size(), false);
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
if (mTrMeshEdToTr[i].c != 0)
{
if (mEdgeFlag[i] == EXTERNAL_EDGE || mEdgeFlag[i] == EXTERNAL_BORDER_EDGE)
{
mRestrictionFlag[mEdges[i].e] = true;
mRestrictionFlag[mEdges[i].s] = true;
}
}
}
std::vector<Vertex> localVertices = mVertices;
recalcNoiseDirs();
// relax(relaxIterations, relaxFactor, localVertices);
/**
Apply noise
*/
for (uint32_t i = 0; i < localVertices.size(); ++i)
{
if (!mRestrictionFlag[i])
{
float d = noise.sample(toNvShared(localVertices[i].p));
toNvShared(localVertices[i].p) +=
(falloffFunction(mVerticesDistances[i], falloff)) * mVerticesNormalsSmoothed[i] * d;
}
}
/* Recalculate smoothed normals*/
mVerticesNormalsSmoothed.assign(mVerticesNormalsSmoothed.size(), NvVec3(0, 0, 0));
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
if (mTriangles[i].ea == kNotValidVertexIndex)
{
continue;
}
TriangleIndexed& tr = mTriangles[i];
if (tr.userData == 0)
continue;
Triangle pTr(localVertices[tr.ea], localVertices[tr.eb], localVertices[tr.ec]);
NvVec3 nrm = toNvShared(pTr.b.p - pTr.a.p).cross(toNvShared(pTr.c.p - pTr.a.p)).getNormalized();
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ea]] += nrm;
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.eb]] += nrm;
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ec]] += nrm;
}
for (uint32_t i = 0; i < mVerticesNormalsSmoothed.size(); ++i)
{
mVerticesNormalsSmoothed[i] = mVerticesNormalsSmoothed[mPositionMappedVrt[i]];
mVerticesNormalsSmoothed[i].normalize();
}
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
if (mTriangles[i].ea == kNotValidVertexIndex)
{
continue;
}
TriangleIndexed& tr = mTriangles[i];
if (tr.userData == 0)
continue;
localVertices[tr.ea].n = fromNvShared(mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ea]]);
localVertices[tr.eb].n = fromNvShared(mVerticesNormalsSmoothed[mPositionMappedVrt[tr.eb]]);
localVertices[tr.ec].n = fromNvShared(mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ec]]);
}
mResultTriangles.clear();
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
if (mTriangles[i].ea == kNotValidVertexIndex)
{
continue;
}
mResultTriangles.push_back({ localVertices[mTriangles[i].ea], localVertices[mTriangles[i].eb],
localVertices[mTriangles[i].ec], mTriangles[i].userData, mTriangles[i].materialId,
mTriangles[i].smoothingGroup });
}
}
void MeshNoiser::prebuildTesselatedTriangles()
{
mResultTriangles.clear();
for (uint32_t i = 0; i < mVertices.size(); ++i)
{
mVertices[i].p = mVertices[i].p * mScale + fromNvShared(mOffset);
}
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
if (mTriangles[i].ea == kNotValidVertexIndex)
{
continue;
}
mResultTriangles.push_back({ mVertices[mTriangles[i].ea], mVertices[mTriangles[i].eb], mVertices[mTriangles[i].ec],
mTriangles[i].userData, mTriangles[i].materialId, mTriangles[i].smoothingGroup });
}
}
std::vector<Triangle> MeshNoiser::getMesh()
{
return mResultTriangles;
}
void MeshNoiser::reset()
{
mVertices.clear();
mTriangles.clear();
mEdges.clear();
mVertMap.clear();
mEdgeMap.clear();
mResultTriangles.clear();
mRestrictionFlag.clear();
mEdgeFlag.clear();
mTrMeshEdToTr.clear();
mVertexValence.clear();
mVertexToTriangleMap.clear();
mVerticesDistances.clear();
mVerticesNormalsSmoothed.clear();
mPositionMappedVrt.clear();
mGeometryGraph.clear();
isTesselated = false;
mOffset = NvVec3(0, 0, 0);
mScale = 1.0f;
} | 31,963 | C++ | 31.48374 | 127 | 0.548384 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBondGeneratorImpl.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTEXTAUTHORINGBONDGENERATORIMPL_H
#define NVBLASTEXTAUTHORINGBONDGENERATORIMPL_H
#include "NvBlastExtAuthoringBondGenerator.h"
#include "NvBlastExtAuthoringFractureTool.h"
#include "NvPlane.h"
#include <NvBlastExtAuthoringConvexMeshBuilder.h>
#include <vector>
#include <set>
namespace Nv
{
namespace Blast
{
/**
Tool for gathering bond information from provided mesh geometry
*/
class BlastBondGeneratorImpl : public BlastBondGenerator
{
public:
BlastBondGeneratorImpl(ConvexMeshBuilder* builder)
: mConvexMeshBuilder(builder) {};
virtual void release() override;
virtual int32_t buildDescFromInternalFracture(FractureTool* tool, const bool* chunkIsSupport,
NvBlastBondDesc*& resultBondDescs, NvBlastChunkDesc*& resultChunkDescriptors) override;
virtual int32_t createBondBetweenMeshes(uint32_t meshACount, const Triangle* meshA, uint32_t meshBCount, const Triangle* meshB,
NvBlastBond& resultBond, BondGenerationConfig conf) override;
virtual int32_t createBondBetweenMeshes(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry,
uint32_t overlapsCount, const uint32_t* overlapsA, const uint32_t* overlapsB,
NvBlastBondDesc*& resultBond, BondGenerationConfig cfg) override;
virtual int32_t bondsFromPrefractured(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry,
const bool* chunkIsSupport, NvBlastBondDesc*& resultBondDescs,
BondGenerationConfig conf) override;
virtual int32_t bondsFromPrefractured(uint32_t meshCount, const uint32_t* convexHullOffset, const CollisionHull** chunkHulls,
const bool* chunkIsSupport, const uint32_t* meshGroups, NvBlastBondDesc*& resultBondDescs, float maxSeparation) override;
private:
float processWithMidplanes(TriangleProcessor* trProcessor, const Triangle* mA, uint32_t mavc, const Triangle* mB, uint32_t mbvc, const CollisionHull* hull1, const CollisionHull* hull2,
const std::vector<nvidia::NvVec3>& hull1p, const std::vector<nvidia::NvVec3>& hull2p,
nvidia::NvVec3& normal, nvidia::NvVec3& centroid, float maxRelSeparation);
int32_t createFullBondListAveraged( uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry, const CollisionHull** chunkHulls,
const bool* supportFlags, const uint32_t* meshGroups, NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf, std::set<std::pair<uint32_t, uint32_t> >* pairNotToTest = nullptr);
int32_t createFullBondListExact( uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry,
const bool* supportFlags, NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf);
int32_t createFullBondListExactInternal(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry,
std::vector<PlaneChunkIndexer>& planeTriangleMapping , NvBlastBondDesc*& resultBondDescs);
int32_t createBondForcedInternal( const std::vector<nvidia::NvVec3>& hull0, const std::vector<nvidia::NvVec3>& hull1,const CollisionHull& cHull0,
const CollisionHull& cHull1, nvidia::NvBounds3 bound0, nvidia::NvBounds3 bound1, NvBlastBond& resultBond, float overlapping);
void buildGeometryCache(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry);
void resetGeometryCache();
ConvexMeshBuilder* mConvexMeshBuilder;
std::vector<std::vector<Triangle> > mGeometryCache;
std::vector<PlaneChunkIndexer> mPlaneCache;
std::vector<CollisionHull*> mCHullCache;
std::vector<std::vector<nvidia::NvVec3> > mHullsPointsCache;
std::vector<nvidia::NvBounds3 > mBoundsCache;
};
} // namespace Blast
} // namespace Nv
#endif // NVBLASTEXTAUTHORINGBONDGENERATORIMPL_H | 5,635 | C | 51.672897 | 223 | 0.730612 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoring.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastExtAuthoring.h"
#include "NvBlastTypes.h"
#include "NvBlastIndexFns.h"
#include "NvBlast.h"
#include "NvBlastAssert.h"
#include "NvBlastGlobals.h"
#include "NvBlastExtAssetUtils.h"
#include "NvBlastExtAuthoringPatternGeneratorImpl.h"
#include "NvBlastExtAuthoringBooleanToolImpl.h"
#include "NvBlastExtAuthoringAcceleratorImpl.h"
#include "NvBlastExtAuthoringMeshImpl.h"
#include "NvBlastExtAuthoringMeshCleanerImpl.h"
#include "NvBlastExtAuthoringFractureToolImpl.h"
#include "NvBlastExtAuthoringBondGeneratorImpl.h"
#include "NvBlastExtAuthoringCollisionBuilderImpl.h"
#include "NvBlastExtAuthoringCutoutImpl.h"
#include "NvBlastExtAuthoringInternalCommon.h"
#include "NvBlastNvSharedHelpers.h"
#include <algorithm>
#include <memory>
using namespace Nv::Blast;
using namespace nvidia;
#define SAFE_ARRAY_NEW(T, x) ((x) > 0) ? reinterpret_cast<T*>(NVBLAST_ALLOC(sizeof(T) * (x))) : nullptr;
#define SAFE_ARRAY_DELETE(x) if (x != nullptr) {NVBLAST_FREE(x); x = nullptr;}
Mesh* NvBlastExtAuthoringCreateMesh(const NvcVec3* position, const NvcVec3* normals, const NvcVec2* uv, uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount)
{
return new MeshImpl(position, normals, uv, verticesCount, indices, indicesCount);
}
Mesh* NvBlastExtAuthoringCreateMeshOnlyTriangles(const void* Vertices, uint32_t vcount, uint32_t* indices, uint32_t indexCount, void* materials, uint32_t materialStride)
{
return new MeshImpl((Vertex*)Vertices, vcount, indices, indexCount, materials, materialStride);
}
Mesh* NvBlastExtAuthoringCreateMeshFromFacets(const void* vertices, const void* edges, const void* facets, uint32_t verticesCount, uint32_t edgesCount, uint32_t facetsCount)
{
return new MeshImpl((Vertex*)vertices, (Edge*)edges, (Facet*)facets, verticesCount, edgesCount, facetsCount);
}
MeshCleaner* NvBlastExtAuthoringCreateMeshCleaner()
{
return new MeshCleanerImpl;
}
VoronoiSitesGenerator* NvBlastExtAuthoringCreateVoronoiSitesGenerator(Mesh* mesh, RandomGeneratorBase* rng)
{
return new VoronoiSitesGeneratorImpl(mesh, rng);
}
CutoutSet* NvBlastExtAuthoringCreateCutoutSet()
{
return new CutoutSetImpl();
}
void NvBlastExtAuthoringBuildCutoutSet(CutoutSet& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight,
float segmentationErrorThreshold, float snapThreshold, bool periodic, bool expandGaps)
{
::createCutoutSet(*(CutoutSetImpl*)&cutoutSet, pixelBuffer, bufferWidth, bufferHeight, segmentationErrorThreshold, snapThreshold, periodic, expandGaps);
}
FractureTool* NvBlastExtAuthoringCreateFractureTool()
{
return new FractureToolImpl;
}
BlastBondGenerator* NvBlastExtAuthoringCreateBondGenerator(Nv::Blast::ConvexMeshBuilder* builder)
{
return new BlastBondGeneratorImpl(builder);
}
int32_t NvBlastExtAuthoringBuildMeshConvexDecomposition(ConvexMeshBuilder* cmb, const Nv::Blast::Triangle* mesh,
uint32_t triangleCount,
const ConvexDecompositionParams& params,
CollisionHull**& convexes)
{
NVBLAST_ASSERT(cmb != nullptr);
return buildMeshConvexDecomposition(*cmb, mesh, triangleCount, params, convexes);
}
void NvBlastExtAuthoringTrimCollisionGeometry(ConvexMeshBuilder* cmb, uint32_t chunksCount,
Nv::Blast::CollisionHull** in, const uint32_t* chunkDepth)
{
return trimCollisionGeometry(*cmb, chunksCount, in, chunkDepth);
}
void NvBlastExtAuthoringTransformCollisionHullInPlace(CollisionHull* hull, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation)
{
// Local copies of scaling (S), rotation (R), and translation (T)
nvidia::NvVec3 S = { 1, 1, 1 };
nvidia::NvQuat R = { 0, 0, 0, 1 };
nvidia::NvVec3 T = { 0, 0, 0 };
nvidia::NvVec3 cofS = { 1, 1, 1 };
float sgnDetS = 1;
{
if (rotation)
{
R = *toNvShared(rotation);
}
if (scaling)
{
S = *toNvShared(scaling);
cofS.x = S.y * S.z;
cofS.y = S.z * S.x;
cofS.z = S.x * S.y;
sgnDetS = (S.x * S.y * S.z < 0) ? -1 : 1;
}
if (translation)
{
T = *toNvShared(translation);
}
}
const uint32_t pointCount = hull->pointsCount;
for (uint32_t pi = 0; pi < pointCount; pi++)
{
nvidia::NvVec3& p = toNvShared(hull->points[pi]);
p = (R.rotate(p.multiply(S)) + T);
}
const uint32_t planeCount = hull->polygonDataCount;
for (uint32_t pi = 0; pi < planeCount; pi++)
{
float* plane = hull->polygonData[pi].plane;
nvidia::NvPlane nvPlane(plane[0], plane[1], plane[2], plane[3]);
NvVec3 transformedNormal = sgnDetS*R.rotate(nvPlane.n.multiply(cofS)).getNormalized();
NvVec3 transformedPt = R.rotate(nvPlane.pointInPlane().multiply(S)) + T;
nvidia::NvPlane transformedPlane(transformedPt, transformedNormal);
plane[0] = transformedPlane.n[0];
plane[1] = transformedPlane.n[1];
plane[2] = transformedPlane.n[2];
plane[3] = transformedPlane.d;
}
}
CollisionHull* NvBlastExtAuthoringTransformCollisionHull(const CollisionHull* hull, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation)
{
CollisionHull* ret = new CollisionHull(*hull);
ret->points = SAFE_ARRAY_NEW(NvcVec3, ret->pointsCount);
ret->indices = SAFE_ARRAY_NEW(uint32_t, ret->indicesCount);
ret->polygonData = SAFE_ARRAY_NEW(HullPolygon, ret->polygonDataCount);
memcpy(ret->points, hull->points, sizeof(ret->points[0]) * ret->pointsCount);
memcpy(ret->indices, hull->indices, sizeof(ret->indices[0]) * ret->indicesCount);
memcpy(ret->polygonData, hull->polygonData, sizeof(ret->polygonData[0]) * ret->polygonDataCount);
NvBlastExtAuthoringTransformCollisionHullInPlace(ret, scaling, rotation, translation);
return ret;
}
void buildPhysicsChunks(ConvexMeshBuilder& collisionBuilder, AuthoringResult& result, const ConvexDecompositionParams& params, uint32_t chunksToProcessCount = 0, uint32_t* chunksToProcess = nullptr)
{
uint32_t chunkCount = (uint32_t)result.chunkCount;
if (params.maximumNumberOfHulls == 1)
{
result.collisionHullOffset = SAFE_ARRAY_NEW(uint32_t, chunkCount + 1);
result.collisionHullOffset[0] = 0;
result.collisionHull = SAFE_ARRAY_NEW(CollisionHull*, chunkCount);
for (uint32_t i = 0; i < chunkCount; ++i)
{
std::vector<NvcVec3> vertices;
for (uint32_t p = result.geometryOffset[i]; p < result.geometryOffset[i + 1]; ++p)
{
Nv::Blast::Triangle& tri = result.geometry[p];
vertices.push_back(tri.a.p);
vertices.push_back(tri.b.p);
vertices.push_back(tri.c.p);
}
result.collisionHullOffset[i + 1] = result.collisionHullOffset[i] + 1;
result.collisionHull[i] = collisionBuilder.buildCollisionGeometry((uint32_t)vertices.size(), vertices.data());
}
}
else
{
std::set<int32_t> chunkSet;
for (uint32_t c = 0; c < chunksToProcessCount; c++)
{
chunkSet.insert(chunksToProcess[c]);
}
std::vector<std::vector<CollisionHull*> > hulls(chunkCount);
int32_t totalHulls = 0;
for (uint32_t i = 0; i < chunkCount; ++i)
{
if (chunkSet.size() > 0 && chunkSet.find(i) == chunkSet.end())
{
int32_t newHulls = result.collisionHullOffset[i + 1] - result.collisionHullOffset[i];
int32_t off = result.collisionHullOffset[i];
for (int32_t subhull = 0; subhull < newHulls; ++subhull)
{
hulls[i].push_back(result.collisionHull[off + subhull]);
}
totalHulls += newHulls;
continue;
}
CollisionHull** tempHull;
int32_t newHulls =
buildMeshConvexDecomposition(collisionBuilder, result.geometry + result.geometryOffset[i],
result.geometryOffset[i + 1] - result.geometryOffset[i], params, tempHull);
totalHulls += newHulls;
for (int32_t h = 0; h < newHulls; ++h)
{
hulls[i].push_back(tempHull[h]);
}
SAFE_ARRAY_DELETE(tempHull);
}
result.collisionHullOffset = SAFE_ARRAY_NEW(uint32_t, chunkCount + 1);
result.collisionHullOffset[0] = 0;
result.collisionHull = SAFE_ARRAY_NEW(CollisionHull*, totalHulls);
for (uint32_t i = 0; i < chunkCount; ++i)
{
result.collisionHullOffset[i + 1] = result.collisionHullOffset[i] + hulls[i].size();
int32_t off = result.collisionHullOffset[i];
for (uint32_t subhull = 0; subhull < hulls[i].size(); ++subhull)
{
result.collisionHull[off + subhull] = hulls[i][subhull];
}
}
}
}
void NvBlastExtAuthoringReleaseAuthoringResultCollision(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar)
{
if (ar->collisionHull != nullptr)
{
for (uint32_t ch = 0; ch < ar->collisionHullOffset[ar->chunkCount]; ch++)
{
collisionBuilder.releaseCollisionHull(ar->collisionHull[ch]);
}
SAFE_ARRAY_DELETE(ar->collisionHullOffset);
SAFE_ARRAY_DELETE(ar->collisionHull);
}
}
void NvBlastExtAuthoringReleaseAuthoringResult(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar)
{
NvBlastExtAuthoringReleaseAuthoringResultCollision(collisionBuilder, ar);
if (ar->asset)
{
NVBLAST_FREE(ar->asset);
ar->asset = nullptr;
}
SAFE_ARRAY_DELETE(ar->assetToFractureChunkIdMap);
SAFE_ARRAY_DELETE(ar->geometryOffset);
SAFE_ARRAY_DELETE(ar->geometry);
SAFE_ARRAY_DELETE(ar->chunkDescs);
SAFE_ARRAY_DELETE(ar->bondDescs);
delete ar;
}
static float getGeometryVolumeAndCentroid(NvcVec3& centroid, const Nv::Blast::Triangle* tris, size_t triCount)
{
class GeometryQuery
{
public:
GeometryQuery(const Nv::Blast::Triangle* tris, size_t triCount) : m_tris(tris), m_triCount(triCount) {}
size_t faceCount() const { return m_triCount; }
size_t vertexCount(size_t faceIndex) const { NV_UNUSED(faceIndex); return 3; }
NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) const
{
const Nv::Blast::Triangle& tri = m_tris[faceIndex];
switch (vertexIndex)
{
case 0: return tri.a.p;
case 1: return tri.b.p;
case 2: return tri.c.p;
}
return NvcVec3({0.0f, 0.0f, 0.0f});
}
const Nv::Blast::Triangle* m_tris;
size_t m_triCount;
};
return calculateMeshVolumeAndCentroid<GeometryQuery>(centroid, {tris, triCount});
}
AuthoringResult* NvBlastExtAuthoringProcessFracture(FractureTool& fTool, BlastBondGenerator& bondGenerator, ConvexMeshBuilder& collisionBuilder, const ConvexDecompositionParams& collisionParam, int32_t defaultSupportDepth)
{
fTool.finalizeFracturing();
const uint32_t chunkCount = fTool.getChunkCount();
if (chunkCount == 0)
{
return nullptr;
}
AuthoringResult* ret = new AuthoringResult;
if (ret == nullptr)
{
return nullptr;
}
AuthoringResult& aResult = *ret;
aResult.chunkCount = chunkCount;
std::shared_ptr<bool> isSupport(new bool[chunkCount], [](bool* b) {delete[] b; });
memset(isSupport.get(), 0, sizeof(bool) * chunkCount);
for (uint32_t i = 0; i < fTool.getChunkCount(); ++i)
{
if (defaultSupportDepth < 0 || fTool.getChunkDepth(fTool.getChunkId(i)) < defaultSupportDepth)
{
isSupport.get()[i] = fTool.getChunkInfo(i).isLeaf;
}
else if (fTool.getChunkDepth(fTool.getChunkId(i)) == defaultSupportDepth)
{
isSupport.get()[i] = true;
}
}
const uint32_t bondCount = bondGenerator.buildDescFromInternalFracture(&fTool, isSupport.get(), aResult.bondDescs, aResult.chunkDescs);
aResult.bondCount = bondCount;
if (bondCount == 0)
{
aResult.bondDescs = nullptr;
}
// order chunks, build map
std::vector<uint32_t> chunkReorderInvMap;
{
std::vector<uint32_t> chunkReorderMap(chunkCount);
std::vector<char> scratch(chunkCount * sizeof(NvBlastChunkDesc));
NvBlastEnsureAssetExactSupportCoverage(aResult.chunkDescs, chunkCount, scratch.data(), logLL);
NvBlastBuildAssetDescChunkReorderMap(chunkReorderMap.data(), aResult.chunkDescs, chunkCount, scratch.data(), logLL);
NvBlastApplyAssetDescChunkReorderMapInPlace(aResult.chunkDescs, chunkCount, aResult.bondDescs, bondCount, chunkReorderMap.data(), true, scratch.data(), logLL);
chunkReorderInvMap.resize(chunkReorderMap.size());
Nv::Blast::invertMap(chunkReorderInvMap.data(), chunkReorderMap.data(), static_cast<unsigned int>(chunkReorderMap.size()));
}
// get result geometry
aResult.geometryOffset = SAFE_ARRAY_NEW(uint32_t, chunkCount + 1);
aResult.assetToFractureChunkIdMap = SAFE_ARRAY_NEW(uint32_t, chunkCount + 1);
aResult.geometryOffset[0] = 0;
std::vector<Nv::Blast::Triangle*> chunkGeometry(chunkCount);
for (uint32_t i = 0; i < chunkCount; ++i)
{
uint32_t chunkInfoIndex = chunkReorderInvMap[i];
aResult.geometryOffset[i+1] = aResult.geometryOffset[i] + fTool.getBaseMesh(chunkInfoIndex, chunkGeometry[i]);
aResult.assetToFractureChunkIdMap[i] = fTool.getChunkId(chunkInfoIndex);
}
aResult.geometry = SAFE_ARRAY_NEW(Triangle, aResult.geometryOffset[chunkCount]);
for (uint32_t i = 0; i < chunkCount; ++i)
{
uint32_t trianglesCount = aResult.geometryOffset[i + 1] - aResult.geometryOffset[i];
memcpy(aResult.geometry + aResult.geometryOffset[i], chunkGeometry[i], trianglesCount * sizeof(Nv::Blast::Triangle));
delete chunkGeometry[i];
chunkGeometry[i] = nullptr;
}
float maxX = FLT_MAX;
float maxY = FLT_MAX;
float maxZ = FLT_MAX;
float minX = -FLT_MAX;
float minY = -FLT_MAX;
float minZ = -FLT_MAX;
for (uint32_t i = 0; i < bondCount; i++)
{
NvBlastBondDesc& bondDesc = aResult.bondDescs[i];
minX = std::min(minX, bondDesc.bond.centroid[0]);
maxX = std::max(maxX, bondDesc.bond.centroid[0]);
minY = std::min(minY, bondDesc.bond.centroid[1]);
maxY = std::max(maxY, bondDesc.bond.centroid[1]);
minZ = std::min(minZ, bondDesc.bond.centroid[2]);
maxZ = std::max(maxZ, bondDesc.bond.centroid[2]);
}
// prepare physics data (convexes)
buildPhysicsChunks(collisionBuilder, aResult, collisionParam);
// set NvBlastChunk volume and centroid from CollisionHull
for (uint32_t i = 0; i < chunkCount; i++)
{
float totalVolume = 0.f;
NvcVec3 totalCentroid = {0.0f, 0.0f, 0.0f};
for (uint32_t k = aResult.collisionHullOffset[i]; k < aResult.collisionHullOffset[i+1]; k++)
{
const CollisionHull* hull = aResult.collisionHull[k];
if (hull)
{
NvcVec3 centroid;
const float volume = calculateCollisionHullVolumeAndCentroid(centroid, *hull);
totalVolume += volume;
totalCentroid = totalCentroid + volume*centroid;
}
else
{
totalVolume = 0.0f; // Found a null hull, signal this with zero volume
break;
}
}
if (totalVolume > 0.0f)
{
totalCentroid = totalCentroid / totalVolume;
aResult.chunkDescs[i].volume = totalVolume;
aResult.chunkDescs[i].centroid[0] = totalCentroid.x;
aResult.chunkDescs[i].centroid[1] = totalCentroid.y;
aResult.chunkDescs[i].centroid[2] = totalCentroid.z;
}
else
{
// Fallback to using mesh
size_t triCount = aResult.geometryOffset[i+1] - aResult.geometryOffset[i];
const Nv::Blast::Triangle* tris = aResult.geometry + aResult.geometryOffset[i];
NvcVec3 centroid;
aResult.chunkDescs[i].volume = getGeometryVolumeAndCentroid(centroid, tris, triCount);
aResult.chunkDescs[i].centroid[0] = centroid.x;
aResult.chunkDescs[i].centroid[1] = centroid.y;
aResult.chunkDescs[i].centroid[2] = centroid.z;
}
}
// build and serialize ExtPhysicsAsset
NvBlastAssetDesc descriptor;
descriptor.bondCount = bondCount;
descriptor.bondDescs = aResult.bondDescs;
descriptor.chunkCount = chunkCount;
descriptor.chunkDescs = aResult.chunkDescs;
std::vector<uint8_t> scratch(static_cast<unsigned int>(NvBlastGetRequiredScratchForCreateAsset(&descriptor, logLL)));
void* mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&descriptor, logLL));
aResult.asset = NvBlastCreateAsset(mem, &descriptor, scratch.data(), logLL);
//aResult.asset = std::shared_ptr<NvBlastAsset>(asset, [=](NvBlastAsset* asset)
//{
// NVBLAST_FREE(asset);
//});
//std::cout << "Done" << std::endl;
ret->materialCount = 0;
ret->materialNames = nullptr;
return ret;
}
uint32_t NvBlastExtAuthoringFindAssetConnectingBonds
(
const NvBlastAsset** components,
const NvcVec3* scales,
const NvcQuat* rotations,
const NvcVec3* translations,
const uint32_t** convexHullOffsets,
const CollisionHull*** chunkHulls,
uint32_t componentCount,
NvBlastExtAssetUtilsBondDesc*& newBondDescs,
float maxSeparation
)
{
//We don't need to use any of the cooking related parts of this
BlastBondGeneratorImpl bondGenerator(nullptr);
std::vector<uint32_t> componentChunkOffsets;
componentChunkOffsets.reserve(componentCount + 1);
componentChunkOffsets.push_back(0);
std::vector<uint32_t> combinedConvexHullOffsets;
std::vector<const CollisionHull*> combinedConvexHulls;
std::vector<CollisionHull*> hullsToRelease;
combinedConvexHullOffsets.push_back(0);
std::vector<uint32_t> originalComponentIndex;
const nvidia::NvVec3 identityScale(1);
//Combine our hull lists into a single combined list for bondsFromPrefractured
for (uint32_t c = 0; c < componentCount; c++)
{
const uint32_t chunkCount = NvBlastAssetGetChunkCount(components[c], &logLL);
const NvcVec3* scale = scales ? scales + c : nullptr;
const NvcQuat* rotation = rotations ? rotations + c : nullptr;
const NvcVec3* translation = translations ? translations + c : nullptr;
componentChunkOffsets.push_back(chunkCount + componentChunkOffsets.back());
for (uint32_t chunk = 0; chunk < chunkCount; chunk++)
{
const uint32_t hullsStart = convexHullOffsets[c][chunk];
const uint32_t hullsEnd = convexHullOffsets[c][chunk + 1];
for (uint32_t hull = hullsStart; hull < hullsEnd; hull++)
{
if ((scale != nullptr && *toNvShared(scale) != identityScale) ||
(rotation != nullptr && !toNvShared(rotation)->isIdentity()) ||
(translation != nullptr && !toNvShared(translation)->isZero()))
{
hullsToRelease.emplace_back(NvBlastExtAuthoringTransformCollisionHull(chunkHulls[c][hull], scale, rotation, translation));
combinedConvexHulls.emplace_back(hullsToRelease.back());
}
else
{
//No need to transform
combinedConvexHulls.emplace_back(chunkHulls[c][hull]);
}
}
combinedConvexHullOffsets.push_back((hullsEnd - hullsStart) + combinedConvexHullOffsets.back());
originalComponentIndex.push_back(c);
}
}
const uint32_t totalChunkCount = componentChunkOffsets.back();
//Can't use std::vector<bool> since we need a bool* later
std::unique_ptr<bool[]> isSupportChunk(new bool[totalChunkCount]);
for (uint32_t c = 0; c < componentCount; c++)
{
const uint32_t chunkCount = componentChunkOffsets[c + 1] - componentChunkOffsets[c];
NvBlastSupportGraph supportGraph = NvBlastAssetGetSupportGraph(components[c], &logLL);
for (uint32_t chunk = 0; chunk < chunkCount; chunk++)
{
auto chunkIndiciesEnd = supportGraph.chunkIndices + supportGraph.nodeCount;
isSupportChunk[chunk + componentChunkOffsets[c]] = (std::find(supportGraph.chunkIndices, chunkIndiciesEnd, chunk) != chunkIndiciesEnd);
}
}
//Find the bonds
NvBlastBondDesc* newBonds = nullptr;
const int32_t newBoundCount = bondGenerator.bondsFromPrefractured(totalChunkCount, combinedConvexHullOffsets.data(), combinedConvexHulls.data(), isSupportChunk.get(), originalComponentIndex.data(), newBonds, maxSeparation);
//Convert the bonds back to per-component chunks
newBondDescs = SAFE_ARRAY_NEW(NvBlastExtAssetUtilsBondDesc, newBoundCount);
for (int32_t nb = 0; nb < newBoundCount; ++nb)
{
newBondDescs[nb].bond = newBonds[nb].bond;
for (uint32_t ci = 0; ci < 2; ++ci)
{
uint32_t absChunkIdx = newBonds[nb].chunkIndices[ci];
uint32_t componentIdx = originalComponentIndex[absChunkIdx];
newBondDescs[nb].componentIndices[ci] = componentIdx;
newBondDescs[nb].chunkIndices[ci] = absChunkIdx - componentChunkOffsets[componentIdx];
}
}
//Don't need this anymore
NVBLAST_FREE(newBonds);
// These hulls were generated by NvBlastExtAuthoringTransformCollisionHull, which uses SAFE_ARRAY_NEW
// to allocate the arrays referenced in each hull. Be sure to delete the array pointers here before
// deleting the CollisionHull structs.
for (CollisionHull* hull : hullsToRelease)
{
SAFE_ARRAY_DELETE(hull->indices);
SAFE_ARRAY_DELETE(hull->points);
SAFE_ARRAY_DELETE(hull->polygonData);
delete hull;
}
return newBoundCount;
}
void NvBlastExtAuthoringUpdateGraphicsMesh(Nv::Blast::FractureTool& fTool, Nv::Blast::AuthoringResult& aResult)
{
uint32_t chunkCount = fTool.getChunkCount();
for (uint32_t i = 0; i < chunkCount; ++i)
{
fTool.updateBaseMesh(fTool.getChunkInfoIndex(aResult.assetToFractureChunkIdMap[i]), aResult.geometry + aResult.geometryOffset[i]);
}
}
void NvBlastExtAuthoringBuildCollisionMeshes(Nv::Blast::AuthoringResult& ares, Nv::Blast::ConvexMeshBuilder& collisionBuilder,
const Nv::Blast::ConvexDecompositionParams& collisionParam, uint32_t chunksToProcessCount, uint32_t* chunksToProcess)
{
buildPhysicsChunks(collisionBuilder, ares, collisionParam, chunksToProcessCount, chunksToProcess);
}
PatternGenerator* NvBlastExtAuthoringCreatePatternGenerator()
{
return NVBLAST_NEW(PatternGeneratorImpl);
}
SpatialGrid* NvBlastExtAuthoringCreateSpatialGrid(uint32_t resolution, const Mesh* m)
{
Grid* g = NVBLAST_NEW(Grid)(resolution);
g->setMesh(m);
return g;
}
SpatialAccelerator* NvBlastExtAuthoringCreateGridAccelerator(SpatialGrid* parentGrid)
{
return NVBLAST_NEW(GridAccelerator)((Grid*)parentGrid);
}
SpatialAccelerator* NvBlastExtAuthoringCreateSweepingAccelerator(const Mesh* m)
{
return NVBLAST_NEW(SweepingAccelerator)(m);
}
SpatialAccelerator* NvBlastExtAuthoringCreateBBoxBasedAccelerator(uint32_t resolution, const Mesh* m)
{
return NVBLAST_NEW(BBoxBasedAccelerator)(m, resolution);
}
BooleanTool* NvBlastExtAuthoringCreateBooleanTool()
{
return new BooleanToolImpl;
}
| 25,671 | C++ | 39.428346 | 227 | 0.661797 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtTriangleProcessor.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastExtTriangleProcessor.h"
#include "NvBlastExtAuthoringInternalCommon.h"
#define COLLIN_EPS 1e-4f
#define V_COMP_EPS 1e-5f
using namespace nvidia;
namespace Nv
{
namespace Blast
{
/**
Segments bounding box interseciton test
*/
bool boundingRectangleIntersection(const NvVec2& s1, const NvVec2& e1, const NvVec2& s2, const NvVec2& e2)
{
// sl1/sl2 is always left bottom end of rectangle
// se1/el2 is always right top end of rectangle
float sl1, sl2, el1, el2;
if (s1.x < e1.x)
{
sl1 = s1.x;
el1 = e1.x;
}
else
{
el1 = s1.x;
sl1 = e1.x;
}
if (s2.x < e2.x)
{
sl2 = s2.x;
el2 = e2.x;
}
else
{
el2 = s2.x;
sl2 = e2.x;
}
if (NvMax(sl1, sl2) > NvMin(el1, el2))
return false;
if (s1.y < e1.y)
{
sl1 = s1.y;
el1 = e1.y;
}
else
{
el1 = s1.y;
sl1 = e1.y;
}
if (s2.y < e2.y)
{
sl2 = s2.y;
el2 = e2.y;
}
else
{
el2 = s2.y;
sl2 = e2.y;
}
if (NvMax(sl1, sl2) > NvMin(el1, el2))
return false;
return true;
}
inline float getRotation(NvVec2 a, NvVec2 b)
{
return a.x * b.y - a.y * b.x;
}
inline float getParameter(const NvVec2& a, const NvVec2& b, const NvVec2& point)
{
return (point - a).magnitude() / (b - a).magnitude();
}
inline NvVec3 lerp3D(const NvVec3& a, const NvVec3& b, const float t)
{
return (b - a) * t + a;
}
struct Line2D
{
NvVec2 normal;
float c;
Line2D(NvVec2 vec, NvVec2 point)
{
normal.x = vec.y;
normal.y = -vec.x;
c = -normal.dot(point);
}
};
uint32_t TriangleProcessor::getSegmentIntersection(const NvVec2& s1, const NvVec2& e1, const NvVec2& s2, const NvVec2& e2, float& t1)
{
if (!boundingRectangleIntersection(s1, e1, s2, e2))
return 0;
NvVec2 vec1 = e1 - s1;
NvVec2 vec2 = e2 - s2;
float det1 = getRotation(vec1, vec2);
if (NvAbs(det1) < COLLIN_EPS)
{
return 0;
}
Line2D lineA(vec1, s1);
Line2D lineB(vec2, s2);
NvVec2 fInt;
float detX = lineA.normal.y * lineB.c - lineA.c * lineB.normal.y;
float detY = lineA.c * lineB.normal.x - lineB.c * lineA.normal.x;
float x = detX / det1;
float y = detY / det1;
if (x + V_COMP_EPS >= NvMax(NvMin(s1.x, e1.x), NvMin(s2.x, e2.x)) &&
x - V_COMP_EPS <= NvMin(NvMax(s1.x, e1.x), NvMax(s2.x, e2.x)) &&
y + V_COMP_EPS >= NvMax(NvMin(s1.y, e1.y), NvMin(s2.y, e2.y)) &&
y - V_COMP_EPS <= NvMin(NvMax(s1.y, e1.y), NvMax(s2.y, e2.y)))
{
fInt.x = x;
fInt.y = y;
t1 = getParameter(s1, e1, fInt);
return 1;
}
return 0;
}
struct cwComparer
{
NvVec3 basePoint;
NvVec3 normal;
cwComparer(NvVec3 basePointIn, NvVec3 norm)
{
basePoint = basePointIn;
normal = norm;
};
bool operator()(const NvVec3& a, const NvVec3& b)
{
NvVec3 norm = (a - basePoint).cross(b - basePoint);
return normal.dot(norm) > 0;
}
};
bool vec3Comparer(const NvVec3& a, const NvVec3& b)
{
if (a.x + V_COMP_EPS < b.x) return true;
if (a.x - V_COMP_EPS > b.x) return false;
if (a.y + V_COMP_EPS < b.y) return true;
if (a.y - V_COMP_EPS > b.y) return false;
if (a.z + V_COMP_EPS < b.z) return true;
return false;
}
void TriangleProcessor::sortToCCW(std::vector<NvVec3>& points, NvVec3& normal)
{
std::sort(points.begin(), points.end(), vec3Comparer);
int lastUnique = 0;
for (uint32_t i = 1; i < points.size(); ++i)
{
NvVec3 df = (points[i] - points[lastUnique]).abs();
if (df.x > V_COMP_EPS || df.y > V_COMP_EPS || df.z > V_COMP_EPS)
{
points[++lastUnique] = points[i];
}
}
points.resize(lastUnique + 1);
if (points.size() > 2)
{
cwComparer compr(points[0], normal);
std::sort(points.begin() + 1, points.end(), compr);
}
}
void TriangleProcessor::buildConvexHull(std::vector<NvVec3>& points, std::vector<NvVec3>& convexHull,const NvVec3& normal)
{
std::sort(points.begin(), points.end(), vec3Comparer);
int lastUnique = 0;
for (uint32_t i = 1; i < points.size(); ++i)
{
NvVec3 df = (points[i] - points[lastUnique]).abs();
if (df.x > V_COMP_EPS || df.y > V_COMP_EPS || df.z > V_COMP_EPS)
{
points[++lastUnique] = points[i];
}
}
points.resize(lastUnique + 1);
if (points.size() > 2)
{
cwComparer compr(points[0], normal);
std::sort(points.begin() + 1, points.end(), compr);
}
if (points.size() < 3)
return;
convexHull.push_back(points[0]);
convexHull.push_back(points[1]);
ProjectionDirections projectionDirection = getProjectionDirection(normal);
for (uint32_t i = 2; i < points.size(); ++i)
{
NvVec2 pnt = getProjectedPointWithWinding(points[i], projectionDirection);
NvVec2 vec = pnt - getProjectedPointWithWinding(convexHull.back(), projectionDirection);
if (NvAbs(vec.x) < V_COMP_EPS && NvAbs(vec.y) < V_COMP_EPS)
{
continue;
}
if (getRotation(vec, getProjectedPointWithWinding(convexHull.back(), projectionDirection) - getProjectedPointWithWinding(convexHull[convexHull.size() - 2], projectionDirection)) < 0)
{
convexHull.push_back(points[i]);
}
else
{
while (convexHull.size() > 1 && getRotation(vec, getProjectedPointWithWinding(convexHull.back(), projectionDirection) - getProjectedPointWithWinding(convexHull[convexHull.size() - 2], projectionDirection)) > 0)
{
convexHull.pop_back();
vec = pnt - getProjectedPointWithWinding(convexHull.back(), projectionDirection);
}
convexHull.push_back(points[i]);
}
}
}
uint32_t TriangleProcessor::getTriangleIntersection(TrPrcTriangle& a, TrPrcTriangle2d& aProjected, TrPrcTriangle &b, NvVec3& centroid, std::vector<NvVec3>& intersectionBuffer, NvVec3 normal)
{
b.points[0] -= centroid;
b.points[1] -= centroid;
b.points[2] -= centroid;
ProjectionDirections prjDir = getProjectionDirection(normal);
TrPrcTriangle2d bProjected;
bProjected.points[0] = getProjectedPointWithWinding(b.points[0], prjDir);
bProjected.points[1] = getProjectedPointWithWinding(b.points[1], prjDir);
bProjected.points[2] = getProjectedPointWithWinding(b.points[2], prjDir);
if (!triangleBoundingBoxIntersection(aProjected, bProjected)) return 0;
//* Check triangle A against points of B *//
for (int i = 0; i < 3; ++i)
{
if (isPointInside(bProjected.points[i], aProjected))
{
intersectionBuffer.push_back(b.points[i]);
}
}
//* Check triangle B against points of A *//
for (int i = 0; i < 3; ++i)
{
if (isPointInside(aProjected.points[i], bProjected))
{
intersectionBuffer.push_back(a.points[i]);
}
}
//* Check edges intersection *//
float param = 0;
for (int i = 0; i < 3; ++i)
{
for (int j = 0; j < 3; ++j)
{
if (getSegmentIntersection(aProjected.points[i], aProjected.points[(i + 1) % 3], bProjected.points[j], bProjected.points[(j + 1) % 3], param))
{
intersectionBuffer.push_back(lerp3D(a.points[i], a.points[(i + 1) % 3], param));
}
}
}
if (intersectionBuffer.size() == 0)
return 0;
// Intersection between two triangles is convex, but points should be reordered to construct right polygon //
std::vector<NvVec3> intrs;
buildConvexHull(intersectionBuffer, intrs, normal);
intersectionBuffer = intrs;
// Return all points back from origin //
for (uint32_t i = 0; i < intersectionBuffer.size(); ++i)
{
intersectionBuffer[i] += centroid;
}
return 1;
}
bool TriangleProcessor::triangleBoundingBoxIntersection(TrPrcTriangle2d& a, TrPrcTriangle2d& b)
{
float fb = std::min(a.points[0].x, std::min(a.points[1].x, a.points[2].x));
float fe = std::max(a.points[0].x, std::max(a.points[1].x, a.points[2].x));
float sb = std::min(b.points[0].x, std::min(b.points[1].x, b.points[2].x));
float se = std::max(b.points[0].x, std::max(b.points[1].x, b.points[2].x));
if (std::min(fe, se) + V_COMP_EPS < std::max(fb, sb)) return 0;
fb = std::min(a.points[0].y, std::min(a.points[1].y, a.points[2].y));
fe = std::max(a.points[0].y, std::max(a.points[1].y, a.points[2].y));
sb = std::min(b.points[0].y, std::min(b.points[1].y, b.points[2].y));
se = std::max(b.points[0].y, std::max(b.points[1].y, b.points[2].y));
if (std::min(fe, se) + V_COMP_EPS < std::max(fb, sb)) return 0;
return 1;
}
uint32_t TriangleProcessor::isPointInside(const NvVec2& point, const TrPrcTriangle2d& triangle)
{
float av = getRotation(point - triangle.points[0], triangle.points[1] - triangle.points[0]);
float bv = getRotation(point - triangle.points[1], triangle.points[2] - triangle.points[1]);
float cv = getRotation(point - triangle.points[2], triangle.points[0] - triangle.points[2]);
if (NvAbs(av) < COLLIN_EPS) av = 0;
if (NvAbs(bv) < COLLIN_EPS) bv = 0;
if (NvAbs(cv) < COLLIN_EPS) cv = 0;
if (av >= 0 && bv >= 0 && cv >= 0)
{
if (av == 0 || bv == 0 || cv == 0)
return 2;
return 1;
}
if (av <= 0 && bv <= 0 && cv <= 0)
{
if (av == 0 || bv == 0 || cv == 0)
return 2;
return 1;
}
return 0;
}
} // namespace Blast
} // namespace Nv
| 11,307 | C++ | 29.316354 | 222 | 0.600425 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringPatternGeneratorImpl.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#define _CRT_SECURE_NO_WARNINGS
#include "NvBlastGlobals.h"
#include "NvBlastAssert.h"
#include "NvBlastExtAuthoringTypes.h"
#include "NvBlastExtAuthoringPatternGeneratorImpl.h"
#include "NvBlastExtAuthoringMeshUtils.h"
#include "NvBlastExtAuthoringMeshImpl.h"
#include "NvBlastExtAuthoringFractureToolImpl.h"
#include "NvBlastExtAuthoringBooleanToolImpl.h"
#include "NvBlastExtAuthoringTriangulator.h"
#include "NvBlastExtAuthoringPerlinNoise.h"
#include <NvBlastNvSharedHelpers.h>
#include <vector>
using namespace Nv::Blast;
using namespace nvidia;
struct DamagePatternImpl : public DamagePattern
{
virtual void release() override;
};
DamagePattern* PatternGeneratorImpl::generateUniformPattern(const UniformPatternDesc* desc)
{
std::vector<NvcVec3> points;
float radiusDelta = desc->radiusMax - desc->radiusMin;
for (uint32_t i = 0; i < desc->cellsCount; ++i)
{
float rd = desc->RNG() * radiusDelta + desc->radiusMin;
if (desc->radiusDistr != 1.0f)
{
rd = std::pow(rd / desc->radiusMax, desc->radiusDistr) * desc->radiusMax;
}
float phi = desc->RNG() * 6.28f;
float theta = (desc->RNG()) * 6.28f;
float x = rd * cos(phi) * sin(theta);
float y = rd * sin(phi) * sin(theta);
float z = rd * cos(theta);
points.push_back({x, y, z});
}
auto pattern = generateVoronoiPattern((uint32_t)points.size(), points.data(), desc->interiorMaterialId);
pattern->activationRadius = desc->radiusMax * desc->debrisRadiusMult;
return pattern;
}
DamagePattern* PatternGeneratorImpl::generateVoronoiPattern(uint32_t cellCount, const NvcVec3* inPoints, int32_t interiorMaterialId)
{
return generateVoronoiPatternInternal(cellCount, inPoints, interiorMaterialId);
}
DamagePattern* PatternGeneratorImpl::generateVoronoiPatternInternal(uint32_t cellCount, const NvcVec3* inPoints, int32_t interiorMaterialId, float angle)
{
DamagePatternImpl* pattern = NVBLAST_NEW(DamagePatternImpl);
std::vector<NvcVec3> points(cellCount);
NvcVec3 orig = {0, 0, 0};
for (uint32_t i = 0; i < cellCount; ++i)
{
points[i] = inPoints[i];
orig = orig + points[i];
}
orig = orig / cellCount;
std::vector<std::vector<std::pair<int32_t, int32_t>>> neighbors;
findCellBasePlanes(points, neighbors);
Mesh** patterns = (Mesh**)NVBLAST_ALLOC(sizeof(Mesh*) * cellCount);
//PreparedMesh** prepMeshes = (PreparedMesh**)NVBLAST_ALLOC(sizeof(PreparedMesh*) * cellCount);
BooleanEvaluator evl;
for (uint32_t i = 0; i < cellCount; ++i)
{
patterns[i] = getCellMesh(evl, 0, i, points, neighbors, interiorMaterialId, orig);
if (patterns[i] == nullptr)
{
continue;
}
if (angle != 0)
{
auto* vr = patterns[i]->getVerticesWritable();
for (uint32_t j = 0; j < patterns[i]->getVerticesCount(); ++j)
{
float& z = vr[j].p.z;
z -= 3.8f;
if (z < -2) // we presume that this vertex has infinite -z position (everything scaled to unit cube).
{
if (angle > 0)
{
float d = sqrt(vr[j].p.x * vr[j].p.x + vr[j].p.y * vr[j].p.y);
vr[j].p.x *= (d + 4 * tan(angle * nvidia::NvPi / 180.f)) / d;
vr[j].p.y *= (d + 4 * tan(angle * nvidia::NvPi / 180.f)) / d;
}
}
}
patterns[i]->recalculateBoundingBox();
}
}
for (int32_t i = cellCount - 1; i >= 0; i--)
{
if (patterns[i] == nullptr)
{
cellCount--;
std::swap(patterns[i], patterns[cellCount]);
//std::swap(prepMeshes[i], prepMeshes[cellCount]);
}
}
pattern->cellsCount = cellCount;
pattern->cellsMeshes = patterns;
//pattern->preparedMeshes = prepMeshes;
#ifdef USE_MERGED_MESH
pattern->outputEdges = NVBLAST_ALLOC(sizeof(BooleanResultEdge) * (cellCount * BLASTRT_MAX_EDGES_PER_CHUNK));
pattern->outputEdgesCount = (uint32_t*)NVBLAST_ALLOC(sizeof(uint32_t) * cellCount);
#endif
return pattern;
}
DamagePattern* PatternGeneratorImpl::generateBeamPattern(const BeamPatternDesc* desc)
{
std::vector<NvcVec3> points;
float radiusDelta = desc->radiusMax - desc->radiusMin;
for (uint32_t i = 0; i < desc->cellsCount; ++i)
{
float rd = desc->RNG() * radiusDelta + desc->radiusMin;
float phi = desc->RNG() * 6.28f;
float x = rd * cos(phi);
float y = rd * sin(phi);
float z = desc->RNG() - 1;
points.push_back({x, y, z});
}
auto pattern = generateVoronoiPattern((uint32_t)points.size(), points.data(), desc->interiorMaterialId);
pattern->activationType = DamagePattern::Line;
return pattern;
}
DamagePattern* PatternGeneratorImpl::generateRegularRadialPattern(const RegularRadialPatternDesc* desc)
{
SimplexNoise noise(desc->radialNoiseAmplitude, desc->radialNoiseFrequency, 3, desc->RNG() * 999999);
std::vector<NvVec3> points;
float radialDelta = (desc->radiusMax - desc->radiusMin) / desc->radialSteps;
float angularDelta = 2 * acos(-1.0f) / desc->angularSteps;
for (uint32_t i = 0; i < desc->radialSteps; ++i)
{
for (uint32_t j = 0; j < desc->angularSteps; ++j)
{
float angle = j * angularDelta + desc->RNG() * desc->angularNoiseAmplitude;
float rd = ((i + noise.sample(NvVec3(angle, 0, 0))) * radialDelta + desc->radiusMin);
float x = rd * cos(angle);
float y = rd * sin(angle);
float z = 0;
points.push_back(NvVec3(x, y, z));
}
}
float mrd = 0.0;
for (uint32_t i = 0; i < points.size(); ++i)
{
mrd = std::max(mrd, points[i].magnitude());
}
for (uint32_t i = 0; i < points.size(); ++i)
{
points[i] *= desc->radiusMax / mrd;
}
float ap = std::max(0.0f, desc->aperture);
auto pattern = generateVoronoiPatternInternal((uint32_t)points.size(), fromNvShared(points.data()), desc->interiorMaterialId, ap);
pattern->activationRadius = desc->radiusMax * desc->debrisRadiusMult;
pattern->activationType = (ap == 0) ? DamagePattern::Line : DamagePattern::Cone;
pattern->angle = ap;
return pattern;
}
void PatternGeneratorImpl::release()
{
NVBLAST_DELETE(this, PatternGeneratorImpl);
}
void DamagePatternImpl::release()
{
if (cellsMeshes)
{
for (uint32_t i = 0; i < cellsCount; i++)
{
cellsMeshes[i]->release();
}
NVBLAST_FREE(cellsMeshes);
}
#ifdef USE_MERGED_MESH
if (outputEdges)
{
NVBLAST_FREE(outputEdges);
}
if (outputEdgesCount)
{
NVBLAST_FREE(outputEdgesCount);
}
if (mergedMesh)
{
mergedMesh->release();
}
if (preparedMergedMesh)
{
preparedMergedMesh->release();
}
if (validFacetsForChunk)
{
for (uint32_t i = 0; i < cellsCount; i++)
{
if (validFacetsForChunk[i])
{
NVBLAST_FREE(validFacetsForChunk[i]);
}
}
NVBLAST_FREE(validFacetsForChunk);
}
#endif
NVBLAST_DELETE(this, DamagePatternImpl);
}
namespace Nv
{
namespace Blast
{
void savePatternToObj(DamagePattern* pattern)
{
FILE* fl = fopen("Pattern.obj", "w");
std::vector<uint32_t> trc;
for (uint32_t mesh = 0; mesh < pattern->cellsCount; ++mesh)
{
Mesh* m = pattern->cellsMeshes[mesh];
Triangulator trgl;
trgl.triangulate(m);
auto& t = trgl.getBaseMesh();
for (uint32_t v = 0; v < t.size(); ++v)
{
fprintf(fl, "v %f %f %f\n", t[v].a.p.x, t[v].a.p.y, t[v].a.p.z);
fprintf(fl, "v %f %f %f\n", t[v].b.p.x, t[v].b.p.y, t[v].b.p.z);
fprintf(fl, "v %f %f %f\n", t[v].c.p.x, t[v].c.p.y, t[v].c.p.z);
}
trc.push_back(t.size());
}
uint32_t cv = 1;
for (uint32_t m = 0; m < trc.size(); ++m)
{
fprintf(fl, "g %d\n", m);
for (uint32_t k = 0; k < trc[m]; ++k)
{
fprintf(fl, "f %d %d %d \n", cv, cv + 1, cv + 2);
cv += 3;
}
}
fclose(fl);
}
}
} | 10,186 | C++ | 31.650641 | 153 | 0.595327 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBooleanToolImpl.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTEXTAUTHORINGBOOLEANTOOLIMPL_H
#define NVBLASTEXTAUTHORINGBOOLEANTOOLIMPL_H
#include "NvBlastExtAuthoringTypes.h"
#include "NvBlastExtAuthoringInternalCommon.h"
#include "NvBlastExtAuthoringBooleanTool.h"
#include <vector>
#include "NvBlastTypes.h"
namespace Nv
{
namespace Blast
{
class Mesh;
/**
Boolean tool config, used to perform different operations: UNION, INTERSECTION, DIFFERENCE
*/
struct BooleanConf
{
int32_t ca, cb, ci;
BooleanConf(int32_t a, int32_t b, int32_t c) : ca(a), cb(b), ci(c)
{
}
};
namespace BooleanConfigurations
{
/**
Creates boolean tool configuration to perform intersection of meshes A and B.
*/
inline BooleanConf BOOLEAN_INTERSECTION()
{
return BooleanConf(0, 0, 1);
}
/**
Creates boolean tool configuration to perform union of meshes A and B.
*/
inline BooleanConf BOOLEAN_UNION()
{
return BooleanConf(1, 1, -1);
}
/**
Creates boolean tool configuration to perform difference of meshes(A - B).
*/
inline BooleanConf BOOLEAN_DIFFERENCE()
{
return BooleanConf(1, 0, -1);
}
}
/**
Structure which holds information about intersection facet with edge.
*/
struct EdgeFacetIntersectionData
{
int32_t edId;
int32_t intersectionType;
Vertex intersectionPoint;
EdgeFacetIntersectionData(int32_t edId, int32_t intersType, Vertex& inters) : edId(edId), intersectionType(intersType), intersectionPoint(inters)
{ }
EdgeFacetIntersectionData(int32_t edId) : edId(edId)
{ }
bool operator<(const EdgeFacetIntersectionData& b) const
{
return edId < b.edId;
}
};
class SpatialAccelerator;
/**
Tool for performing boolean operations on polygonal meshes.
Tool supports only closed meshes. Performing boolean on meshes with holes can lead to unexpected behavior, e.g. holes in result geometry.
*/
class BooleanEvaluator
{
public:
BooleanEvaluator();
~BooleanEvaluator();
/**
Perform boolean operation on two polygonal meshes (A and B).
\param[in] meshA Mesh A
\param[in] meshB Mesh B
\param[in] spAccelA Acceleration structure for mesh A
\param[in] spAccelB Acceleration structure for mesh B
\param[in] mode Boolean operation type
*/
void performBoolean(const Mesh* meshA, const Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, const BooleanConf& mode);
/**
Perform boolean operation on two polygonal meshes (A and B).
\param[in] meshA Mesh A
\param[in] meshB Mesh B
\param[in] mode Boolean operation type
*/
void performBoolean(const Mesh* meshA, const Mesh* meshB, const BooleanConf& mode);
/**
Perform cutting of mesh with some large box, which represents cutting plane. This method skips part of intersetion computations, so
should be used ONLY with cutting box, received from getBigBox(...) method from NvBlastExtAuthoringMesh.h. For cutting use only BOOLEAN_INTERSECTION or BOOLEAN_DIFFERENCE mode.
\param[in] meshA Mesh A
\param[in] meshB Cutting box
\param[in] spAccelA Acceleration structure for mesh A
\param[in] spAccelB Acceleration structure for cutting box
\param[in] mode Boolean operation type
*/
void performFastCutting(const Mesh* meshA, const Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, const BooleanConf& mode);
/**
Perform cutting of mesh with some large box, which represents cutting plane. This method skips part of intersetion computations, so
should be used ONLY with cutting box, received from getBigBox(...) method from NvBlastExtAuthoringMesh.h. For cutting use only BOOLEAN_INTERSECTION or BOOLEAN_DIFFERENCE mode.
\param[in] meshA Mesh A
\param[in] meshB Cutting box
\param[in] mode Boolean operation type
*/
void performFastCutting(const Mesh* meshA, const Mesh* meshB, const BooleanConf& mode);
/**
Test whether point contained in mesh.
\param[in] mesh Mesh geometry
\param[in] point Point which should be tested
\return not 0 if point is inside of mesh
*/
int32_t isPointContainedInMesh(const Mesh* mesh, const NvcVec3& point);
/**
Test whether point contained in mesh.
\param[in] mesh Mesh geometry
\param[in] spAccel Acceleration structure for mesh
\param[in] point Point which should be tested
\return not 0 if point is inside of mesh
*/
int32_t isPointContainedInMesh(const Mesh* mesh, SpatialAccelerator* spAccel, const NvcVec3& point);
/**
Generates result polygon mesh after performing boolean operation.
\return If not nullptr - result mesh geometry.
*/
Mesh* createNewMesh();
/**
Reset tool state.
*/
void reset();
private:
void buildFaceFaceIntersections(const BooleanConf& mode);
void buildFastFaceFaceIntersection(const BooleanConf& mode);
void collectRetainedPartsFromA(const BooleanConf& mode);
void collectRetainedPartsFromB(const BooleanConf& mode);
int32_t addIfNotExist(const Vertex& p);
void addEdgeIfValid(const EdgeWithParent& ed);
private:
int32_t vertexMeshStatus03(const NvcVec3& p, const Mesh* mesh);
int32_t vertexMeshStatus30(const NvcVec3& p, const Mesh* mesh);
const Mesh* mMeshA;
const Mesh* mMeshB;
SpatialAccelerator* mAcceleratorA;
SpatialAccelerator* mAcceleratorB;
std::vector<EdgeWithParent> mEdgeAggregate;
std::vector<Vertex> mVerticesAggregate;
std::vector<std::vector<EdgeFacetIntersectionData> > mEdgeFacetIntersectionData12;
std::vector<std::vector<EdgeFacetIntersectionData> > mEdgeFacetIntersectionData21;
};
/// BooleanTool
class BooleanToolImpl : public BooleanTool
{
public:
/**
* Release BooleanTool memory
*/
virtual void release() override;
virtual Mesh* performBoolean(const Mesh* meshA, SpatialAccelerator* accelA, const Mesh* meshB, SpatialAccelerator* accelB, BooleanTool::Op op) override;
virtual bool pointInMesh(const Mesh* mesh, SpatialAccelerator* accel, const NvcVec3& point) override;
private:
BooleanEvaluator m_evaluator;
};
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTEXTAUTHORINGBOOLEANTOOLIMPL_H
| 8,200 | C | 34.349138 | 183 | 0.693659 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshCleanerImpl.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvVec3.h"
#include "NvVec2.h"
#include "NvBounds3.h"
#include <vector>
#include <queue>
#include <map>
#include <NvBlastExtAuthoringMeshCleanerImpl.h>
#include <NvBlastExtAuthoringMeshImpl.h>
#include <NvBlastExtAuthoringInternalCommon.h>
#include <NvBlastNvSharedHelpers.h>
#include <boost/multiprecision/cpp_int.hpp>
using namespace nvidia;
using namespace Nv::Blast;
using namespace boost::multiprecision;
/**
Exact rational vector types.
*/
struct RVec3
{
cpp_rational x, y, z;
RVec3() {}
bool isZero()
{
return x.is_zero() && y.is_zero() && z.is_zero();
}
RVec3(cpp_rational _x, cpp_rational _y, cpp_rational _z)
{
x = _x;
y = _y;
z = _z;
}
RVec3(const NvcVec3& p)
{
x = cpp_rational(p.x);
y = cpp_rational(p.y);
z = cpp_rational(p.z);
}
NvVec3 toVec3()
{
return { x.convert_to<float>(), y.convert_to<float>(), z.convert_to<float>() };
}
RVec3 operator-(const RVec3& b) const
{
return RVec3(x - b.x, y - b.y, z - b.z);
}
RVec3 operator+(const RVec3& b) const
{
return RVec3(x + b.x, y + b.y, z + b.z);
}
RVec3 cross(const RVec3& in) const
{
return RVec3(y * in.z - in.y * z, in.x * z - x * in.z, x * in.y - in.x * y);
}
cpp_rational dot(const RVec3& in) const
{
return x * in.x + y * in.y + z * in.z;
}
RVec3 operator*(const cpp_rational& in) const
{
return RVec3(x * in, y * in, z * in);
}
};
struct RVec2
{
cpp_rational x, y;
RVec2() {}
RVec2(cpp_rational _x, cpp_rational _y)
{
x = _x;
y = _y;
}
RVec2(const NvcVec2& p)
{
x = cpp_rational(p.x);
y = cpp_rational(p.y);
}
NvVec2 toVec2()
{
return { x.convert_to<float>(), y.convert_to<float>() };
}
RVec2 operator-(const RVec2& b) const
{
return RVec2(x - b.x, y - b.y);
}
RVec2 operator+(const RVec2& b) const
{
return RVec2(x + b.x, y + b.y);
}
cpp_rational cross(const RVec2& in) const
{
return x * in.y - y * in.x;
}
cpp_rational dot(const RVec2& in) const
{
return x * in.x + y * in.y;
}
RVec2 operator*(const cpp_rational& in) const
{
return RVec2(x * in, y * in);
}
};
struct RatPlane
{
RVec3 n;
cpp_rational d;
RatPlane(const RVec3& a, const RVec3& b, const RVec3& c)
{
n = (b - a).cross(c - a);
d = -n.dot(a);
};
cpp_rational distance(RVec3& in)
{
return n.dot(in) + d;
}
};
bool isSame(const RatPlane& a, const RatPlane& b)
{
if (a.d != b.d)
return false;
if (a.n.x != b.n.x || a.n.y != b.n.y || a.n.z != b.n.z)
return false;
return true;
}
RVec3 planeSegmInters(RVec3& a, RVec3& b, RatPlane& pl)
{
cpp_rational t = -(a.dot(pl.n) + pl.d) / pl.n.dot(b - a);
RVec3 on = a + (b - a) * t;
return on;
}
enum POINT_CLASS
{
ON_AB = 0,
ON_BC = 1,
ON_AC = 2,
INSIDE_TR,
OUTSIDE_TR,
ON_VERTEX
};
int32_t isPointInside(const RVec2& a, const RVec2& b, const RVec2& c, const RVec2& p)
{
cpp_rational v1 = (b - a).cross(p - a);
cpp_rational v2 = (c - b).cross(p - b);
cpp_rational v3 = (a - c).cross(p - c);
int32_t v1s = v1.sign();
int32_t v2s = v2.sign();
int32_t v3s = v3.sign();
if (v1s * v2s < 0 || v1s * v3s < 0 || v2s * v3s < 0)
return OUTSIDE_TR;
if (v1s == 0 && v2s == 0)
return OUTSIDE_TR;
if (v1s == 0 && v3s == 0)
return OUTSIDE_TR;
if (v2s == 0 && v3s == 0)
return OUTSIDE_TR;
if (v1s == 0)
return ON_AB;
if (v2s == 0)
return ON_BC;
if (v3s == 0)
return ON_AC;
return INSIDE_TR;
}
RVec2 getProjectedPointWithWinding(const RVec3& point, ProjectionDirections dir)
{
if (dir & YZ_PLANE)
{
if (dir & OPPOSITE_WINDING)
{
return RVec2(point.z, point.y);
}
else
return RVec2(point.y, point.z);
}
if (dir & ZX_PLANE)
{
if (dir & OPPOSITE_WINDING)
{
return RVec2(point.z, point.x);
}
return RVec2(point.x, point.z);
}
if (dir & OPPOSITE_WINDING)
{
return RVec2(point.y, point.x);
}
return RVec2(point.x, point.y);
}
struct DelTriangle
{
int32_t p[3];
int32_t n[3];
int32_t parentTriangle;
int32_t getEdWP(int32_t vrt)
{
if (p[0] == vrt)
return 1;
if (p[1] == vrt)
return 2;
if (p[2] == vrt)
return 0;
return -1;
}
int32_t getEdId(int32_t v1, int32_t v2)
{
if (p[0] == v1 && p[1] == v2)
return 0;
if (p[1] == v1 && p[2] == v2)
return 1;
if (p[2] == v1 && p[0] == v2)
return 2;
return -1;
}
int32_t getOppP(int32_t v1, int32_t v2)
{
if (p[0] == v1 && p[1] == v2)
return 2;
if (p[1] == v1 && p[2] == v2)
return 0;
if (p[2] == v1 && p[0] == v2)
return 1;
return -1;
}
int32_t getOppPoint(int32_t v1, int32_t v2)
{
if (p[0] != v1 && p[0] != v2)
return p[0];
if (p[1] != v1 && p[1] != v2)
return p[1];
if (p[2] != v1 && p[2] != v2)
return p[2];
return -1;
}
bool compare(const DelTriangle& t) const
{
if (p[0] == t.p[0] && p[1] == t.p[1] && p[2] == t.p[2])
return true;
if (p[1] == t.p[0] && p[2] == t.p[1] && p[0] == t.p[2])
return true;
if (p[2] == t.p[0] && p[0] == t.p[1] && p[1] == t.p[2])
return true;
return false;
}
};
struct DelEdge
{
int32_t s, e;
int32_t nr, nl;
};
bool isIntersectsTriangle(RVec2& a, RVec2& b, RVec2& c, RVec2& s, RVec2& e)
{
RVec2 vec = e - s;
if ((a - s).cross(vec) * (b - s).cross(vec) < 0)
{
RVec2 vec2 = b - a;
if ((s - a).cross(vec2) * (e - a).cross(vec) < 0)
return true;
}
if ((b - s).cross(vec) * (c - s).cross(vec) < 0)
{
RVec2 vec2 = c - b;
if ((s - b).cross(vec2) * (e - b).cross(vec) < 0)
return true;
}
if ((a - s).cross(vec) * (c - s).cross(vec) < 0)
{
RVec2 vec2 = a - c;
if ((s - c).cross(vec2) * (e - c).cross(vec) < 0)
return true;
}
return false;
}
inline int32_t inCircumcircle(RVec2& a, RVec2& b, RVec2& c, RVec2& p)
{
RVec2 ta = a - p;
RVec2 tb = b - p;
RVec2 tc = c - p;
cpp_rational ad = ta.dot(ta);
cpp_rational bd = tb.dot(tb);
cpp_rational cd = tc.dot(tc);
cpp_rational pred =
ta.x * (tb.y * cd - tc.y * bd) - ta.y * (tb.x * cd - tc.x * bd) + ad * (tb.x * tc.y - tc.x * tb.y);
if (pred > 0)
return 1;
if (pred < 0)
return -1;
return 0;
}
int32_t getEdge(std::vector<DelEdge>& edges, int32_t s, int32_t e)
{
for (uint32_t i = 0; i < edges.size(); ++i)
{
if (edges[i].s == s && edges[i].e == e)
return i;
}
edges.push_back(DelEdge());
edges.back().s = s;
edges.back().e = e;
return edges.size() - 1;
}
void reubildAdjacency(std::vector<DelTriangle>& state)
{
for (uint32_t i = 0; i < state.size(); ++i)
{
state[i].n[0] = state[i].n[1] = state[i].n[2] = -1;
}
for (uint32_t i = 0; i < state.size(); ++i)
{
if (state[i].p[0] == -1)
continue;
for (uint32_t j = i + 1; j < state.size(); ++j)
{
if (state[j].p[0] == -1)
continue;
for (uint32_t k = 0; k < 3; ++k)
{
for (uint32_t c = 0; c < 3; ++c)
{
if (state[i].p[k] == state[j].p[(c + 1) % 3] && state[i].p[(k + 1) % 3] == state[j].p[c])
{
state[i].n[k] = j;
state[j].n[c] = i;
}
}
}
}
}
}
void insertPoint(std::vector<RVec2>& vertices, std::vector<DelTriangle>& state, int32_t p, const std::vector<Edge>& edges)
{
std::queue<int32_t> triangleToCheck;
for (uint32_t i = 0; i < state.size(); ++i)
{
if (state[i].p[0] == -1)
continue;
DelTriangle ctr = state[i];
int32_t cv = isPointInside(vertices[ctr.p[0]], vertices[ctr.p[1]], vertices[ctr.p[2]], vertices[p]);
if (cv == OUTSIDE_TR)
continue;
if (cv == INSIDE_TR)
{
uint32_t taInd = state.size();
uint32_t tbInd = state.size() + 1;
uint32_t tcInd = state.size() + 2;
state.resize(state.size() + 3);
state[taInd].p[0] = ctr.p[2];
state[taInd].p[1] = ctr.p[0];
state[taInd].p[2] = p;
state[taInd].n[0] = ctr.n[2];
state[taInd].n[1] = tbInd;
state[taInd].n[2] = tcInd;
state[tbInd].p[0] = ctr.p[0];
state[tbInd].p[1] = ctr.p[1];
state[tbInd].p[2] = p;
state[tbInd].n[0] = ctr.n[0];
state[tbInd].n[1] = tcInd;
state[tbInd].n[2] = taInd;
state[tcInd].p[0] = ctr.p[1];
state[tcInd].p[1] = ctr.p[2];
state[tcInd].p[2] = p;
state[tcInd].n[0] = ctr.n[1];
state[tcInd].n[1] = taInd;
state[tcInd].n[2] = tbInd;
triangleToCheck.push(taInd);
triangleToCheck.push(tbInd);
triangleToCheck.push(tcInd);
/**
Change neighbors
*/
int32_t nb = state[i].n[0];
if (nb != -1)
state[nb].n[state[nb].getEdId(state[i].p[1], state[i].p[0])] = tbInd;
nb = state[i].n[1];
if (nb != -1)
state[nb].n[state[nb].getEdId(state[i].p[2], state[i].p[1])] = tcInd;
nb = state[i].n[2];
if (nb != -1)
state[nb].n[state[nb].getEdId(state[i].p[0], state[i].p[2])] = taInd;
state[i].p[0] = -1;
}
else
{
uint32_t taInd = state.size();
uint32_t tbInd = state.size() + 1;
state.resize(state.size() + 2);
int32_t bPoint = state[i].p[(cv + 2) % 3];
state[taInd].p[0] = bPoint;
state[taInd].p[1] = state[i].p[cv];
state[taInd].p[2] = p;
state[tbInd].p[0] = bPoint;
state[tbInd].p[1] = p;
state[tbInd].p[2] = state[i].p[(cv + 1) % 3];
state[taInd].n[0] = state[i].n[(cv + 2) % 3];
state[taInd].n[1] = -1;
state[taInd].n[2] = tbInd;
state[tbInd].n[0] = taInd;
state[tbInd].n[1] = -1;
state[tbInd].n[2] = state[i].n[(cv + 1) % 3];
if (state[i].n[(cv + 1) % 3] != -1)
for (int32_t k = 0; k < 3; ++k)
if (state[state[i].n[(cv + 1) % 3]].n[k] == (int32_t)i)
{
state[state[i].n[(cv + 1) % 3]].n[k] = tbInd;
break;
}
if (state[i].n[(cv + 2) % 3] != -1)
for (int32_t k = 0; k < 3; ++k)
if (state[state[i].n[(cv + 2) % 3]].n[k] == (int32_t)i)
{
state[state[i].n[(cv + 2) % 3]].n[k] = taInd;
break;
}
triangleToCheck.push(taInd);
triangleToCheck.push(tbInd);
int32_t total = 2;
int32_t oppositeTr = 0;
if (state[i].n[cv] != -1)
{
oppositeTr = state[i].n[cv];
total += 2;
uint32_t tcInd = state.size();
uint32_t tdInd = state.size() + 1;
state.resize(state.size() + 2);
int32_t oped = state[oppositeTr].getEdId(state[i].p[(cv + 1) % 3], state[i].p[cv]);
state[tcInd].n[0] = state[oppositeTr].n[(oped + 2) % 3];
state[tcInd].n[1] = tbInd;
state[tbInd].n[1] = tcInd;
state[tcInd].n[2] = tdInd;
state[tdInd].n[0] = tcInd;
state[tdInd].n[1] = taInd;
state[taInd].n[1] = tdInd;
state[tdInd].n[2] = state[oppositeTr].n[(oped + 1) % 3];
if (state[oppositeTr].n[(oped + 2) % 3] != -1)
for (int32_t k = 0; k < 3; ++k)
if (state[state[oppositeTr].n[(oped + 2) % 3]].n[k] == oppositeTr)
{
state[state[oppositeTr].n[(oped + 2) % 3]].n[k] = tcInd;
break;
}
if (state[oppositeTr].n[(oped + 1) % 3] != -1)
for (int32_t k = 0; k < 3; ++k)
if (state[state[oppositeTr].n[(oped + 1) % 3]].n[k] == oppositeTr)
{
state[state[oppositeTr].n[(oped + 1) % 3]].n[k] = tdInd;
break;
}
int32_t pop = state[oppositeTr].p[(oped + 2) % 3];
state[tcInd].p[0] = pop;
state[tcInd].p[1] = state[i].p[(cv + 1) % 3];
state[tcInd].p[2] = p;
state[tdInd].p[0] = pop;
state[tdInd].p[1] = p;
state[tdInd].p[2] = state[i].p[cv];
state[oppositeTr].p[0] = -1;
triangleToCheck.push(tcInd);
triangleToCheck.push(tdInd);
}
state[i].p[0] = -1;
}
break;
}
while (!triangleToCheck.empty())
{
int32_t ctrid = triangleToCheck.front();
triangleToCheck.pop();
DelTriangle& ctr = state[ctrid];
int32_t oppTr = -5;
int32_t ced = 0;
for (uint32_t i = 0; i < 3; ++i)
{
if (ctr.p[i] != p && ctr.p[(i + 1) % 3] != p)
{
ced = i;
oppTr = ctr.n[i];
break;
}
}
if (oppTr == -1)
continue;
bool toCont = false;
for (size_t i = 0; i < edges.size(); ++i)
{
if ((int32_t)edges[i].s == ctr.p[ced] && ctr.p[(ced + 1) % 3] == (int32_t)edges[i].e)
{
toCont = true;
break;
}
if ((int32_t)edges[i].e == ctr.p[ced] && ctr.p[(ced + 1) % 3] == (int32_t)edges[i].s)
{
toCont = true;
break;
}
}
if (toCont)
continue;
DelTriangle& otr = state[oppTr];
if (inCircumcircle(vertices[state[oppTr].p[0]], vertices[state[oppTr].p[1]], vertices[state[oppTr].p[2]],
vertices[p]) > 0)
{
int32_t notPIndx = 0;
for (; notPIndx < 3; ++notPIndx)
{
if (otr.p[notPIndx] != ctr.p[0] && otr.p[notPIndx] != ctr.p[1] && otr.p[notPIndx] != ctr.p[2])
break;
}
int32_t oppCed = state[oppTr].getEdId(ctr.p[(ced + 1) % 3], ctr.p[ced]);
int32_t ntr1 = ctrid, ntr2 = oppTr;
DelTriangle nt1, nt2;
nt1.p[0] = state[oppTr].p[notPIndx];
nt1.p[1] = p;
nt1.n[0] = ntr2;
nt1.p[2] = ctr.p[ced];
nt1.n[1] = ctr.n[(ced + 2) % 3];
nt1.n[2] = otr.n[(oppCed + 1) % 3];
if (nt1.n[2] != -1)
for (uint32_t k = 0; k < 3; ++k)
if (state[nt1.n[2]].n[k] == oppTr)
state[nt1.n[2]].n[k] = ntr1;
nt2.p[0] = p;
nt2.p[1] = state[oppTr].p[notPIndx];
nt2.n[0] = ntr1;
nt2.p[2] = ctr.p[(ced + 1) % 3];
nt2.n[1] = otr.n[(oppCed + 2) % 3];
nt2.n[2] = ctr.n[(ced + 1) % 3];
if (nt2.n[2] != -1)
for (uint32_t k = 0; k < 3; ++k)
if (state[nt2.n[2]].n[k] == ctrid)
state[nt2.n[2]].n[k] = ntr2;
state[ntr1] = nt1;
state[ntr2] = nt2;
triangleToCheck.push(ntr1);
triangleToCheck.push(ntr2);
}
}
}
bool edgeIsIntersected(const RVec2& a, const RVec2& b, const RVec2& es, const RVec2& ee)
{
RVec2 t = b - a;
cpp_rational temp = (es - a).cross(t) * (ee - a).cross(t);
if (temp < 0)
{
t = es - ee;
if ((a - ee).cross(t) * (b - ee).cross(t) <= 0)
return true;
}
return false;
}
void triangulatePseudoPolygon(std::vector<RVec2>& vertices, int32_t ba, int32_t bb, std::vector<int32_t>& pseudo,
std::vector<DelTriangle>& output)
{
if (pseudo.empty())
return;
int32_t c = 0;
if (pseudo.size() > 1)
{
for (uint32_t i = 1; i < pseudo.size(); ++i)
{
if (inCircumcircle(vertices[ba], vertices[bb], vertices[pseudo[c]], vertices[pseudo[i]]) > 0)
{
c = i;
}
}
std::vector<int32_t> toLeft;
std::vector<int32_t> toRight;
for (int32_t t = 0; t < c; ++t)
{
toLeft.push_back(pseudo[t]);
}
for (size_t t = c + 1; t < pseudo.size(); ++t)
{
toRight.push_back(pseudo[t]);
}
if (toLeft.size() > 0)
triangulatePseudoPolygon(vertices, ba, pseudo[c], toLeft, output);
if (toRight.size() > 0)
triangulatePseudoPolygon(vertices, pseudo[c], bb, toRight, output);
}
output.push_back(DelTriangle());
output.back().p[0] = ba;
output.back().p[1] = bb;
output.back().p[2] = pseudo[c];
}
void insertEdge(std::vector<RVec2>& vertices, std::vector<DelTriangle>& output, int32_t edBeg, int32_t edEnd)
{
bool hasEdge = false;
for (auto& it : output)
{
for (uint32_t i = 0; i < 3; ++i)
if ((it.p[i] == edBeg || it.p[i] == edEnd) && (it.p[(i + 1) % 3] == edBeg || it.p[(i + 1) % 3] == edEnd))
{
hasEdge = true;
}
}
if (hasEdge)
return;
int32_t startTriangle = -1;
int32_t edg = -1;
for (uint32_t i = 0; i < output.size(); ++i)
{
if (output[i].p[0] == -1)
continue;
if (output[i].p[0] == edBeg || output[i].p[1] == edBeg || output[i].p[2] == edBeg)
{
edg = output[i].getEdWP(edBeg);
if (edgeIsIntersected(vertices[edBeg], vertices[edEnd], vertices[output[i].p[edg]],
vertices[output[i].p[(edg + 1) % 3]]))
{
startTriangle = i;
break;
}
}
}
if (startTriangle == -1)
{
return;
}
int32_t cvertex = edBeg;
std::vector<int32_t> pointsAboveEdge;
std::vector<int32_t> pointsBelowEdge;
RVec2 vec = vertices[edEnd] - vertices[edBeg];
if (vec.cross(vertices[output[startTriangle].p[edg]] - vertices[edBeg]) > 0)
{
pointsAboveEdge.push_back(output[startTriangle].p[edg]);
pointsBelowEdge.push_back(output[startTriangle].p[(edg + 1) % 3]);
}
else
{
pointsBelowEdge.push_back(output[startTriangle].p[edg]);
pointsAboveEdge.push_back(output[startTriangle].p[(edg + 1) % 3]);
}
while (1)
{
DelTriangle& ctr = output[startTriangle];
int32_t oed = ctr.getEdWP(cvertex);
int32_t nextTriangle = ctr.n[oed];
if (output[nextTriangle].p[0] == edEnd || output[nextTriangle].p[1] == edEnd || output[nextTriangle].p[2] == edEnd)
{
ctr.p[0] = -1;
output[nextTriangle].p[0] = -1;
break;
}
DelTriangle& otr = output[nextTriangle];
int32_t opp = otr.p[otr.getOppP(ctr.p[(oed + 1) % 3], ctr.p[oed % 3])];
int32_t nextPoint = 0;
if (vec.cross((vertices[opp] - vertices[edBeg])) > 0)
{
pointsAboveEdge.push_back(opp);
if (vec.cross(vertices[ctr.p[(oed + 1) % 3]] - vertices[edBeg]) > 0)
{
nextPoint = ctr.p[(oed + 1) % 3];
}
else
{
nextPoint = ctr.p[oed];
}
}
else
{
pointsBelowEdge.push_back(opp);
if (vec.cross(vertices[ctr.p[(oed + 1) % 3]] - vertices[edBeg]) < 0)
{
nextPoint = ctr.p[(oed + 1) % 3];
}
else
{
nextPoint = ctr.p[oed];
}
}
startTriangle = nextTriangle;
cvertex = nextPoint;
ctr.p[0] = -1;
}
triangulatePseudoPolygon(vertices, edBeg, edEnd, pointsAboveEdge, output);
std::reverse(pointsBelowEdge.begin(), pointsBelowEdge.end());
triangulatePseudoPolygon(vertices, edEnd, edBeg, pointsBelowEdge, output);
reubildAdjacency(output);
}
void buildCDT(std::vector<RVec3>& vertices, std::vector<Edge>& edges, std::vector<DelTriangle>& output,
ProjectionDirections dr)
{
std::vector<DelTriangle> state;
DelTriangle crt;
std::vector<bool> added(vertices.size(), false);
for (uint32_t i = 0; i < 3; ++i)
{
crt.p[i] = edges[i].s;
added[edges[i].s] = true;
crt.n[i] = -1; // dont have neighbors;
}
state.push_back(crt);
std::vector<RVec2> p2d(vertices.size());
for (uint32_t i = 0; i < vertices.size(); ++i)
{
p2d[i] = getProjectedPointWithWinding(vertices[i], dr);
}
for (size_t i = 0; i < edges.size(); ++i)
{
if (!added[edges[i].s])
{
insertPoint(p2d, state, edges[i].s, edges);
added[edges[i].s] = true;
}
if (!added[edges[i].e])
{
insertPoint(p2d, state, edges[i].e, edges);
added[edges[i].e] = true;
}
if (edges[i].s != edges[i].e)
{
insertEdge(p2d, state, edges[i].s, edges[i].e);
}
}
for (uint32_t t = 0; t < state.size(); ++t)
{
if (state[t].p[0] != -1)
{
output.push_back(state[t]);
}
}
}
int32_t intersectSegments(RVec3& s1, RVec3& e1, RVec3& s2, RVec3& e2, ProjectionDirections dir,
std::vector<cpp_rational>& t1v, std::vector<cpp_rational>& t2v);
void getTriangleIntersectionCoplanar(uint32_t tr1, uint32_t tr2, std::vector<std::vector<RVec3> >& stencil,
ProjectionDirections dr)
{
std::vector<cpp_rational> intr1[3];
std::vector<cpp_rational> intr2[3];
RVec3 p1[3];
p1[0] = stencil[tr1][0];
p1[1] = stencil[tr1][1];
p1[2] = stencil[tr1][3];
RVec3 p2[3];
p2[0] = stencil[tr2][0];
p2[1] = stencil[tr2][1];
p2[2] = stencil[tr2][3];
for (uint32_t i = 0; i < 3; ++i)
{
for (uint32_t j = 0; j < 3; ++j)
{
intersectSegments(p1[i], p1[(i + 1) % 3], p2[j], p2[(j + 1) % 3], dr, intr1[i], intr2[j]);
}
}
int32_t inRel1[3];
for (uint32_t i = 0; i < 3; ++i)
{
inRel1[i] = isPointInside(getProjectedPointWithWinding(p2[0], dr), getProjectedPointWithWinding(p2[1], dr),
getProjectedPointWithWinding(p2[2], dr), getProjectedPointWithWinding(p1[i], dr));
}
int32_t inRel2[3];
for (uint32_t i = 0; i < 3; ++i)
{
inRel2[i] = isPointInside(getProjectedPointWithWinding(p1[0], dr), getProjectedPointWithWinding(p1[1], dr),
getProjectedPointWithWinding(p1[2], dr), getProjectedPointWithWinding(p2[i], dr));
}
for (uint32_t i = 0; i < 3; ++i)
{
if (inRel1[i] == INSIDE_TR && inRel1[(i + 1) % 3] == INSIDE_TR)
{
stencil[tr2].push_back(p1[i]);
stencil[tr2].push_back(p1[(i + 1) % 3]);
}
else
{
if (inRel1[i] == INSIDE_TR && intr1[i].size() == 1)
{
stencil[tr2].push_back(p1[i]);
stencil[tr2].push_back((p1[(i + 1) % 3] - p1[i]) * intr1[i][0] + p1[i]);
}
if (inRel1[(i + 1) % 3] == INSIDE_TR && intr1[i].size() == 1)
{
stencil[tr2].push_back(p1[(i + 1) % 3]);
stencil[tr2].push_back((p1[(i + 1) % 3] - p1[i]) * intr1[i][0] + p1[i]);
}
if (intr1[i].size() == 2)
{
stencil[tr2].push_back((p1[(i + 1) % 3] - p1[i]) * intr1[i][0] + p1[i]);
stencil[tr2].push_back((p1[(i + 1) % 3] - p1[i]) * intr1[i][1] + p1[i]);
}
}
}
for (uint32_t i = 0; i < 3; ++i)
{
if (inRel2[i] == INSIDE_TR && inRel2[(i + 1) % 3] == INSIDE_TR)
{
stencil[tr1].push_back(p2[i]);
stencil[tr1].push_back(p2[(i + 1) % 3]);
}
else
{
if (inRel2[i] == INSIDE_TR && intr2[i].size() == 1)
{
stencil[tr1].push_back(p2[i]);
stencil[tr1].push_back((p2[(i + 1) % 3] - p2[i]) * intr2[i][0] + p2[i]);
}
if (inRel2[(i + 1) % 3] == INSIDE_TR && intr2[i].size() == 1)
{
stencil[tr1].push_back(p2[(i + 1) % 3]);
stencil[tr1].push_back((p2[(i + 1) % 3] - p2[i]) * intr2[i][0] + p2[i]);
}
if (intr2[i].size() == 2)
{
stencil[tr1].push_back((p2[(i + 1) % 3] - p2[i]) * intr2[i][0] + p2[i]);
stencil[tr1].push_back((p2[(i + 1) % 3] - p2[i]) * intr2[i][1] + p2[i]);
}
}
}
}
int32_t
getTriangleIntersection3d(uint32_t tr1, uint32_t tr2, std::vector<std::vector<RVec3> >& stencil, ProjectionDirections dr)
{
RatPlane pl1(stencil[tr1][0], stencil[tr1][1], stencil[tr1][3]);
if (pl1.n.isZero())
{
std::swap(tr1, tr2);
pl1 = RatPlane(stencil[tr1][0], stencil[tr1][1], stencil[tr1][3]);
if (pl1.n.isZero())
return 0;
}
cpp_rational d1 = pl1.distance(stencil[tr2][0]);
cpp_rational d2 = pl1.distance(stencil[tr2][1]);
cpp_rational d3 = pl1.distance(stencil[tr2][3]);
int32_t sd1 = d1.sign();
int32_t sd2 = d2.sign();
int32_t sd3 = d3.sign();
if (sd1 == 0 && sd2 == 0 && sd3 == 0)
{
getTriangleIntersectionCoplanar(tr1, tr2, stencil, dr);
return 0;
}
/**
Never intersected
*/
if (sd1 < 0 && sd2 < 0 && sd3 < 0)
return 0;
if (sd1 > 0 && sd2 > 0 && sd3 > 0)
return 0;
RVec3 tb0 = stencil[tr2][0];
RVec3 tb1 = stencil[tr2][1];
RVec3 tb2 = stencil[tr2][3];
if (sd1 * sd3 > 0)
{
std::swap(tb1, tb2);
std::swap(d2, d3);
}
else
{
if (sd2 * sd3 > 0)
{
std::swap(tb0, tb2);
std::swap(d1, d3);
}
else
{
if (sd3 == 0 && sd1 * sd2 < 0)
{
std::swap(tb0, tb2);
std::swap(d1, d3);
}
}
}
RatPlane pl2(stencil[tr2][0], stencil[tr2][1], stencil[tr2][3]);
cpp_rational d21 = pl2.distance(stencil[tr1][0]);
cpp_rational d22 = pl2.distance(stencil[tr1][1]);
cpp_rational d23 = pl2.distance(stencil[tr1][3]);
int32_t sd21 = d21.sign();
int32_t sd22 = d22.sign();
int32_t sd23 = d23.sign();
if (sd21 < 0 && sd22 < 0 && sd23 < 0)
return 0;
if (sd21 > 0 && sd22 > 0 && sd23 > 0)
return 0;
RVec3 ta0 = stencil[tr1][0];
RVec3 ta1 = stencil[tr1][1];
RVec3 ta2 = stencil[tr1][3];
if (sd21 * sd23 > 0)
{
std::swap(ta1, ta2);
std::swap(d22, d23);
}
else
{
if (sd22 * sd23 > 0)
{
std::swap(ta0, ta2);
std::swap(d21, d23);
}
else
{
if (sd23 == 0 && sd21 * sd22 < 0)
{
std::swap(ta0, ta2);
std::swap(d21, d23);
}
}
}
//////////////////////////////////////////////////
RVec3 dir = ta2 - ta0;
cpp_rational dirPlaneDot = dir.dot(pl2.n);
RVec3 pointOnIntersectionLine;
if (dirPlaneDot != 0)
{
pointOnIntersectionLine = ta0 - dir * (d21 / dirPlaneDot);
}
else
{
pointOnIntersectionLine = ta0;
}
RVec3 interLineDir = pl1.n.cross(pl2.n);
cpp_rational sqd = interLineDir.dot(interLineDir);
if (sqd.is_zero())
return 0;
cpp_rational t1p2 = (ta1 - pointOnIntersectionLine).dot(interLineDir) / sqd;
cpp_rational t1p3 = (ta2 - pointOnIntersectionLine).dot(interLineDir) / sqd;
cpp_rational t1p2param = t1p2;
if (d22 != d23)
{
t1p2param = t1p2 + (t1p3 - t1p2) * (d22 / (d22 - d23));
}
t1p2 = (tb0 - pointOnIntersectionLine).dot(interLineDir) / sqd;
t1p3 = (tb2 - pointOnIntersectionLine).dot(interLineDir) / sqd;
cpp_rational t2p1param = t1p2;
if (d1 != d3)
{
t2p1param = t1p2 + (t1p3 - t1p2) * d1 / (d1 - d3);
}
t1p2 = (tb1 - pointOnIntersectionLine).dot(interLineDir) / sqd;
cpp_rational t2p2param = t1p2;
if (d2 != d3)
{
t2p2param = t1p2 + (t1p3 - t1p2) * d2 / (d2 - d3);
}
cpp_rational beg1 = 0;
if (t1p2param < 0)
{
std::swap(beg1, t1p2param);
}
if (t2p2param < t2p1param)
{
std::swap(t2p2param, t2p1param);
}
cpp_rational minEnd = std::min(t1p2param, t2p2param);
cpp_rational maxBeg = std::max(beg1, t2p1param);
if (minEnd > maxBeg)
{
RVec3 p1 = pointOnIntersectionLine + interLineDir * maxBeg;
RVec3 p2 = pointOnIntersectionLine + interLineDir * minEnd;
stencil[tr1].push_back(p1);
stencil[tr1].push_back(p2);
stencil[tr2].push_back(p1);
stencil[tr2].push_back(p2);
return 1;
}
return 0;
}
int32_t intersectSegments(RVec3& s1, RVec3& e1, RVec3& s2, RVec3& e2, ProjectionDirections dir,
std::vector<cpp_rational>& t1v, std::vector<cpp_rational>& t2v)
{
RVec2 s1p = getProjectedPointWithWinding(s1, dir);
RVec2 e1p = getProjectedPointWithWinding(e1, dir);
RVec2 s2p = getProjectedPointWithWinding(s2, dir);
RVec2 e2p = getProjectedPointWithWinding(e2, dir);
RVec2 dir1 = e1p - s1p;
RVec2 dir2 = s2p - e2p;
cpp_rational crs = dir1.cross(dir2);
if (crs != 0)
{
cpp_rational c1 = s2p.x - s1p.x;
cpp_rational c2 = s2p.y - s1p.y;
cpp_rational det1 = c1 * dir2.y - c2 * dir2.x;
cpp_rational det2 = dir1.x * c2 - dir1.y * c1;
cpp_rational t1 = det1 / crs;
cpp_rational t2 = det2 / crs;
if (t1 > 0 && t1 < 1 && (t2 >= 0 && t2 <= 1))
{
t1v.push_back(t1);
}
if (t2 > 0 && t2 < 1 && (t1 >= 0 && t1 <= 1))
{
t2v.push_back(t2);
}
}
else
{
if (dir1.cross(s2p - s1p) == 0)
{
if (dir1.x != 0)
{
cpp_rational t1 = (s2p.x - s1p.x) / dir1.x;
cpp_rational t2 = (e2p.x - s1p.x) / dir1.x;
if (t1 > 0 && t1 < 1)
t1v.push_back(t1);
if (t2 > 0 && t2 < 1)
t1v.push_back(t2);
}
else
{
if (dir1.y != 0)
{
cpp_rational t1 = (s2p.y - s1p.y) / dir1.y;
cpp_rational t2 = (e2p.y - s1p.y) / dir1.y;
if (t1 > 0 && t1 < 1)
t1v.push_back(t1);
if (t2 > 0 && t2 < 1)
t1v.push_back(t2);
}
}
}
if (dir2.cross(s1p - s2p) == 0)
{
dir2 = e2p - s2p;
if (dir2.x != 0)
{
cpp_rational t1 = (s1p.x - s2p.x) / dir2.x;
cpp_rational t2 = (e1p.x - s2p.x) / dir2.x;
if (t1 > 0 && t1 < 1)
t2v.push_back(t1);
if (t2 > 0 && t2 < 1)
t2v.push_back(t2);
}
else
{
if (dir2.y != 0)
{
cpp_rational t1 = (s1p.y - s2p.y) / dir2.y;
cpp_rational t2 = (e1p.y - s2p.y) / dir2.y;
if (t1 > 0 && t1 < 1)
t2v.push_back(t1);
if (t2 > 0 && t2 < 1)
t2v.push_back(t2);
}
}
}
}
return 1;
}
struct RVec3Comparer
{
bool operator()(const RVec3& a, const RVec3& b) const
{
if (a.x < b.x)
return true;
if (a.x > b.x)
return false;
if (a.y < b.y)
return true;
if (a.y > b.y)
return false;
if (a.z < b.z)
return true;
return false;
}
};
void getBarycentricCoords(NvVec2& a, NvVec2& b, NvVec2& c, NvVec2& p, float& u, float& v)
{
NvVec3 v1(b.x - a.x, c.x - a.x, a.x - p.x);
NvVec3 v2(b.y - a.y, c.y - a.y, a.y - p.y);
NvVec3 resl = v1.cross(v2);
u = resl.x / resl.z;
v = resl.y / resl.z;
}
Mesh* MeshCleanerImpl::cleanMesh(const Mesh* mesh)
{
/**
======= Get mesh data ===========
*/
std::vector<Vertex> vertices;
std::vector<Edge> edges;
std::vector<Facet> facets;
vertices.resize(mesh->getVerticesCount());
edges.resize(mesh->getEdgesCount());
facets.resize(mesh->getFacetCount());
nvidia::NvBounds3 bnd;
bnd.setEmpty();
for (uint32_t i = 0; i < mesh->getVerticesCount(); ++i)
{
vertices[i] = mesh->getVertices()[i];
bnd.include(toNvShared(vertices[i].p));
}
for (uint32_t i = 0; i < mesh->getEdgesCount(); ++i)
{
edges[i] = mesh->getEdges()[i];
}
for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
{
facets[i] = mesh->getFacetsBuffer()[i];
}
//======================================
/**
Transform vertices to fit unit cube and snap them to grid.
**/
float scale = 1.0f / bnd.getExtents().abs().maxElement();
int32_t gridSize = 10000; // Grid resolution to which vertices position will be snapped.
for (uint32_t i = 0; i < mesh->getVerticesCount(); ++i)
{
vertices[i].p = (vertices[i].p - fromNvShared(bnd.minimum)) * scale;
vertices[i].p.x = std::floor(vertices[i].p.x * gridSize) / gridSize;
vertices[i].p.y = std::floor(vertices[i].p.y * gridSize) / gridSize;
vertices[i].p.z = std::floor(vertices[i].p.z * gridSize) / gridSize;
}
std::vector<std::vector<RVec3> > triangleStencil(facets.size());
std::vector<NvVec3> facetsNormals(facets.size());
std::vector<NvBounds3> facetBound(facets.size());
for (uint32_t tr1 = 0; tr1 < facets.size(); ++tr1)
{
if (facets[tr1].edgesCount != 3)
{
return nullptr;
}
int32_t fed = facets[tr1].firstEdgeNumber;
triangleStencil[tr1].push_back(vertices[edges[fed].s].p);
triangleStencil[tr1].push_back(vertices[edges[fed].e].p);
triangleStencil[tr1].push_back(vertices[edges[fed + 1].s].p);
triangleStencil[tr1].push_back(vertices[edges[fed + 1].e].p);
triangleStencil[tr1].push_back(vertices[edges[fed + 2].s].p);
triangleStencil[tr1].push_back(vertices[edges[fed + 2].e].p);
facetBound[tr1].setEmpty();
facetBound[tr1].include(toNvShared(vertices[edges[fed].s].p));
facetBound[tr1].include(toNvShared(vertices[edges[fed].e].p));
facetBound[tr1].include(toNvShared(vertices[edges[fed + 2].s].p));
facetBound[tr1].fattenFast(0.001f);
facetsNormals[tr1] = toNvShared(vertices[edges[fed + 1].s].p - vertices[edges[fed].s].p)
.cross(toNvShared(vertices[edges[fed + 2].s].p - vertices[edges[fed].s].p));
}
/**
Build intersections between all pairs of triangles.
*/
for (uint32_t tr1 = 0; tr1 < facets.size(); ++tr1)
{
if (triangleStencil[tr1].empty())
continue;
for (uint32_t tr2 = tr1 + 1; tr2 < facets.size(); ++tr2)
{
if (triangleStencil[tr2].empty())
continue;
if (facetBound[tr1].intersects(facetBound[tr2]) == false)
continue;
getTriangleIntersection3d(tr1, tr2, triangleStencil, getProjectionDirection(facetsNormals[tr1]));
}
}
/**
Reintersect all segments
*/
for (uint32_t tr = 0; tr < triangleStencil.size(); ++tr)
{
std::vector<RVec3>& ctr = triangleStencil[tr];
std::vector<std::vector<cpp_rational> > perSegmentInters(ctr.size() / 2);
for (uint32_t sg1 = 6; sg1 < ctr.size(); sg1 += 2)
{
for (uint32_t sg2 = sg1 + 2; sg2 < ctr.size(); sg2 += 2)
{
intersectSegments(ctr[sg1], ctr[sg1 + 1], ctr[sg2], ctr[sg2 + 1],
getProjectionDirection(facetsNormals[tr]), perSegmentInters[sg1 / 2],
perSegmentInters[sg2 / 2]);
}
}
std::vector<RVec3> newStencil;
newStencil.reserve(ctr.size());
for (uint32_t i = 0; i < ctr.size(); i += 2)
{
int32_t csm = i / 2;
if (perSegmentInters[csm].size() == 0)
{
newStencil.push_back(ctr[i]);
newStencil.push_back(ctr[i + 1]);
}
else
{
cpp_rational current = 0;
newStencil.push_back(ctr[i]);
std::sort(perSegmentInters[csm].begin(), perSegmentInters[csm].end());
for (size_t j = 0; j < perSegmentInters[csm].size(); ++j)
{
if (perSegmentInters[csm][j] > current)
{
current = perSegmentInters[csm][j];
RVec3 pnt = (ctr[i + 1] - ctr[i]) * current + ctr[i];
newStencil.push_back(pnt);
newStencil.push_back(pnt);
}
}
newStencil.push_back(ctr[i + 1]);
}
}
ctr = newStencil;
}
std::vector<RVec3> finalPoints;
std::vector<std::vector<Edge> > tsten(facets.size());
{
std::map<RVec3, uint32_t, RVec3Comparer> mapping;
for (uint32_t tr1 = 0; tr1 < triangleStencil.size(); ++tr1)
{
for (uint32_t j = 0; j < triangleStencil[tr1].size(); j += 2)
{
auto it = mapping.find(triangleStencil[tr1][j]);
int32_t pt = 0;
if (it == mapping.end())
{
mapping[triangleStencil[tr1][j]] = finalPoints.size();
pt = finalPoints.size();
finalPoints.push_back(triangleStencil[tr1][j]);
}
else
{
pt = it->second;
}
Edge newed;
newed.s = pt;
it = mapping.find(triangleStencil[tr1][j + 1]);
if (it == mapping.end())
{
mapping[triangleStencil[tr1][j + 1]] = finalPoints.size();
pt = finalPoints.size();
finalPoints.push_back(triangleStencil[tr1][j + 1]);
}
else
{
pt = it->second;
}
newed.e = pt;
bool hasNewEdge = false;
for (uint32_t e = 0; e < tsten[tr1].size(); ++e)
{
if (tsten[tr1][e].s == newed.s && tsten[tr1][e].e == newed.e)
{
hasNewEdge = true;
break;
}
if (tsten[tr1][e].e == newed.s && tsten[tr1][e].s == newed.e)
{
hasNewEdge = true;
break;
}
}
if (!hasNewEdge)
tsten[tr1].push_back(newed);
}
}
}
/**
Build constrained DT
*/
std::vector<DelTriangle> trs;
for (uint32_t i = 0; i < tsten.size(); ++i)
{
if (tsten[i].size() < 3)
continue;
if (tsten[i].size() > 3)
{
int32_t oldSize = trs.size();
buildCDT(finalPoints, tsten[i], trs, getProjectionDirection(facetsNormals[i]));
for (uint32_t k = oldSize; k < trs.size(); ++k)
trs[k].parentTriangle = i;
}
else
{
trs.push_back(DelTriangle());
trs.back().parentTriangle = i;
for (uint32_t v = 0; v < 3; ++v)
trs.back().p[v] = tsten[i][v].s;
}
}
/**
Remove 'deleted' triangles from array.
*/
{
std::vector<DelTriangle> trstemp;
trstemp.reserve(trs.size());
for (uint32_t i = 0; i < trs.size(); ++i)
{
if (trs[i].p[0] != -1)
trstemp.push_back(trs[i]);
}
trs = trstemp;
}
/**
Filter exterior surface
*/
std::vector<bool> fillingMask(trs.size(), false);
std::map<std::pair<int32_t, int32_t>, int32_t> edgeMap;
std::vector<std::vector<int32_t> > edgeToTriangleMapping;
for (uint32_t i = 0; i < trs.size(); ++i)
{
if (trs[i].p[0] == -1)
continue;
if (trs[i].p[0] == trs[i].p[1] || trs[i].p[2] == trs[i].p[1] || trs[i].p[2] == trs[i].p[0])
{
trs[i].p[0] = -1;
continue;
}
#if 0 // Filter null-area triangles.
if ((finalPoints[trs[i].p[1]] - finalPoints[trs[i].p[0]]).cross(finalPoints[trs[i].p[2]] - finalPoints[trs[i].p[0]]).isZero())
{
trs[i].p[0] = -1;
continue;
}
#endif
for (uint32_t k = 0; k < 3; ++k)
{
int32_t es = trs[i].p[k];
int32_t ee = trs[i].p[(k + 1) % 3];
if (es > ee)
{
std::swap(es, ee);
}
auto pr = std::make_pair(es, ee);
auto iter = edgeMap.find(pr);
if (iter == edgeMap.end())
{
edgeMap[pr] = edgeToTriangleMapping.size();
trs[i].n[k] = edgeToTriangleMapping.size();
edgeToTriangleMapping.resize(edgeToTriangleMapping.size() + 1);
edgeToTriangleMapping.back().push_back(i);
}
else
{
for (uint32_t j = 0; j < edgeToTriangleMapping[iter->second].size(); ++j)
{
if (trs[edgeToTriangleMapping[iter->second][j]].compare(trs[i]))
{
trs[i].p[0] = -1;
break;
}
}
if (trs[i].p[0] != -1)
{
trs[i].n[k] = iter->second;
edgeToTriangleMapping[iter->second].push_back(i);
}
}
}
}
std::queue<int32_t> trque;
float maxx = -1000;
int32_t best = 0;
for (uint32_t i = 0; i < trs.size(); ++i)
{
if (trs[i].p[0] == -1)
continue;
float m = std::max(
finalPoints[trs[i].p[0]].x.convert_to<float>(),
std::max(finalPoints[trs[i].p[1]].x.convert_to<float>(), finalPoints[trs[i].p[2]].x.convert_to<float>()));
if (m > maxx && facetsNormals[trs[i].parentTriangle].x > 0)
{
maxx = m;
best = i;
}
}
if (!trs.empty())
{
trque.push(best);
}
while (!trque.empty())
{
int32_t trid = trque.front();
fillingMask[trid] = true;
DelTriangle& tr = trs[trque.front()];
trque.pop();
for (uint32_t ed = 0; ed < 3; ++ed)
{
auto& tlist = edgeToTriangleMapping[tr.n[ed]];
if (tlist.size() == 2)
{
for (uint32_t k = 0; k < tlist.size(); ++k)
{
int32_t to = tlist[k];
if (to != trid && !fillingMask[to] && edgeToTriangleMapping[trs[to].n[0]].size() > 0 &&
edgeToTriangleMapping[trs[to].n[1]].size() > 0 && edgeToTriangleMapping[trs[to].n[2]].size() > 0)
{
trque.push(tlist[k]);
fillingMask[tlist[k]] = true;
}
}
}
if (tlist.size() > 2)
{
int32_t bestPath = (tlist[0] == trid) ? tlist[1] : tlist[0];
RVec3 start = finalPoints[trs[trid].p[ed]];
RVec3 axis = finalPoints[trs[trid].p[(ed + 1) % 3]] - start;
RVec3 nAxis = finalPoints[trs[trid].p[(ed + 2) % 3]] - start;
RVec3 normal = axis.cross(nAxis);
uint32_t op = trs[bestPath].getOppPoint(trs[trid].p[ed], trs[trid].p[(ed + 1) % 3]);
RVec3 dir2 = (finalPoints[op] - start);
RVec3 normal2 = dir2.cross(axis);
cpp_rational bestDir = normal.cross(normal2).dot(axis);
cpp_rational oldDist = normal2.dot(normal2);
for (uint32_t k = 0; k < tlist.size(); ++k)
{
if (tlist[k] == trid)
continue;
op = trs[tlist[k]].getOppPoint(trs[trid].p[ed], trs[trid].p[(ed + 1) % 3]);
dir2 = (finalPoints[op] - start);
normal2 = dir2.cross(axis);
cpp_rational newOne = normal.cross(normal2).dot(axis);
if (newOne * oldDist < bestDir * normal2.dot(normal2))
{
oldDist = normal2.dot(normal2);
bestPath = tlist[k];
bestDir = newOne;
}
}
if (!fillingMask[bestPath] && edgeToTriangleMapping[trs[bestPath].n[0]].size() > 0 &&
edgeToTriangleMapping[trs[bestPath].n[1]].size() > 0 &&
edgeToTriangleMapping[trs[bestPath].n[2]].size() > 0)
{
trque.push(bestPath);
fillingMask[bestPath] = true;
}
}
edgeToTriangleMapping[tr.n[ed]].clear();
}
}
for (uint32_t id = 0; id < trs.size(); ++id)
{
if (!fillingMask[id])
{
trs[id].p[0] = -1; // Remove triangle
}
}
/////////////////////////////////////////////////////////////////////////////////////////////
std::vector<NvVec3> newVertices;
newVertices.resize(finalPoints.size());
for (uint32_t i = 0; i < finalPoints.size(); ++i)
{
newVertices[i].x = finalPoints[i].x.convert_to<float>();
newVertices[i].y = finalPoints[i].y.convert_to<float>();
newVertices[i].z = finalPoints[i].z.convert_to<float>();
}
/**
Rescale mesh to initial coordinates.
*/
for (uint32_t i = 0; i < finalPoints.size(); ++i)
{
newVertices[i] = newVertices[i] * (1.0f / scale) + bnd.minimum;
}
for (uint32_t i = 0; i < vertices.size(); ++i)
{
vertices[i].p = vertices[i].p * (1.0f / scale) + fromNvShared(bnd.minimum);
}
std::vector<Triangle> result;
result.reserve(trs.size());
{
std::vector<NvVec2> projectedTriangles(facets.size() * 3);
std::vector<Vertex> normalTriangles(facets.size() * 3);
for (uint32_t i = 0; i < facets.size(); ++i)
{
for (uint32_t k = 0; k < 3; ++k)
{
normalTriangles[i * 3 + k] = vertices[edges[facets[i].firstEdgeNumber + k].s];
projectedTriangles[i * 3 + k] = getProjectedPointWithWinding(
vertices[edges[facets[i].firstEdgeNumber + k].s].p, getProjectionDirection(facetsNormals[i])).toVec2();
}
}
for (uint32_t i = 0; i < trs.size(); ++i)
{
if (trs[i].p[0] == -1)
continue;
int32_t id = 0;
int32_t parentTriangle = trs[i].parentTriangle;
float u = 0, v = 0;
result.resize(result.size() + 1);
result.back().materialId = facets[parentTriangle].materialId;
result.back().smoothingGroup = facets[parentTriangle].smoothingGroup;
for (auto vert : { &result.back().a, &result.back().b, &result.back().c })
{
toNvShared(vert->p) = newVertices[trs[i].p[id]];
NvVec2 p = getProjectedPointWithWinding(vert->p, getProjectionDirection(facetsNormals[parentTriangle])).toVec2();
getBarycentricCoords(projectedTriangles[parentTriangle * 3], projectedTriangles[parentTriangle * 3 + 1],
projectedTriangles[parentTriangle * 3 + 2], p, u, v);
vert->uv[0] = (1 - u - v) * normalTriangles[parentTriangle * 3].uv[0] +
u * normalTriangles[parentTriangle * 3 + 1].uv[0] +
v * normalTriangles[parentTriangle * 3 + 2].uv[0];
vert->n = (1 - u - v) * normalTriangles[parentTriangle * 3].n +
u * normalTriangles[parentTriangle * 3 + 1].n + v * normalTriangles[parentTriangle * 3 + 2].n;
++id;
}
}
}
/**
Reuse old buffers to create Mesh
*/
std::vector<NvcVec3> newMeshVertices(result.size() * 3);
std::vector<NvcVec3> newMeshNormals(result.size() * 3);
std::vector<NvcVec2> newMeshUvs(result.size() * 3);
std::vector<int32_t> newMaterialIds(result.size());
std::vector<int32_t> newSmoothingGroups(result.size());
for (uint32_t i = 0; i < result.size(); ++i)
{
Vertex* arr[3] = { &result[i].a, &result[i].b, &result[i].c };
for (uint32_t k = 0; k < 3; ++k)
{
newMeshVertices[i * 3 + k] = arr[k]->p;
newMeshNormals[i * 3 + k] = arr[k]->n;
newMeshUvs[i * 3 + k] = arr[k]->uv[0];
}
}
std::vector<uint32_t> serializedIndices;
serializedIndices.reserve(result.size() * 3);
int32_t cindex = 0;
for (uint32_t i = 0; i < result.size(); ++i)
{
newMaterialIds[i] = result[i].materialId;
newSmoothingGroups[i] = result[i].smoothingGroup;
for (uint32_t pi = 0; pi < 3; ++pi)
serializedIndices.push_back(cindex++);
}
MeshImpl* rMesh = new MeshImpl(newMeshVertices.data(), newMeshNormals.data(), newMeshUvs.data(),
static_cast<uint32_t>(newMeshVertices.size()), serializedIndices.data(),
static_cast<uint32_t>(serializedIndices.size()));
rMesh->setMaterialId(newMaterialIds.data());
rMesh->setSmoothingGroup(newSmoothingGroups.data());
return rMesh;
}
void MeshCleanerImpl::release()
{
delete this;
}
| 53,514 | C++ | 29.650057 | 134 | 0.461038 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshNoiser.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTEXTAUTHORINGMESHNOISER_H
#define NVBLASTEXTAUTHORINGMESHNOISER_H
#include <vector>
#include <map>
#include "NvBlastExtAuthoringInternalCommon.h"
namespace Nv
{
namespace Blast
{
class SimplexNoise;
/**
Structure used on tesselation stage. Maps edge to two neighbor triangles
*/
struct EdgeToTriangles
{
int32_t tr[2];
int32_t c;
EdgeToTriangles()
{
c = 0;
}
/**
Add triangle to edge. Should not be called more than twice for one edge!!!!.
*/
void add(int32_t t)
{
tr[c] = t;
++c;
}
/**
Replaces mapping from one triangle to another.
*/
void replace(int32_t from, int32_t to)
{
if (tr[0] == from)
{
tr[0] = to;
}
else
{
if (c == 2 && tr[1] == from)
{
tr[1] = to;
}
}
}
/**
Get triangle which is mapped by this edge and which index is different than provided.
*/
int32_t getNot(int32_t id)
{
if (tr[0] != id)
{
return tr[0];
}
if (c == 2 && tr[1] != id)
{
return tr[1];
}
return -1;
}
};
/**
Tool for graphic mesh tesselation and adding noise to internal surface. Each triangle must have initialized
Triangle::userInfo field (0 for external surface triangles and != 0 for internal)
*/
class MeshNoiser
{
public:
MeshNoiser()
{
reset();
}
void reset();
/**
Edge flags
*/
enum EdgeFlag { INTERNAL_EDGE, EXTERNAL_BORDER_EDGE, INTERNAL_BORDER_EDGE, EXTERNAL_EDGE, NONE };
/**
Set mesh to tesselate and apply noise
*/
void setMesh(const std::vector<Triangle>& mesh);
/**
Tesselate internal surface.
\param[in] maxLen - maximal length of edge on internal surface.
*/
void tesselateInternalSurface(float maxLen);
/**
Apply noise to internal surface. Must be called only after tesselation!!!
\param[in] noise - noise generator
\param[in] falloff - damping of noise around of external surface
\param[in] relaxIterations - number of smoothing iterations before applying noise
\param[in] relaxFactor - amount of smooting before applying noise.
*/
void applyNoise(SimplexNoise& noise, float falloff, int32_t relaxIterations, float relaxFactor);
std::vector<Triangle> getMesh();
private:
nvidia::NvVec3 mOffset;
float mScale;
bool isTesselated;
/**
Mesh data
*/
std::vector<Vertex> mVertices;
std::vector<TriangleIndexed> mTriangles;
std::vector<Edge> mEdges;
std::map<Vertex, int32_t, VrtComp> mVertMap;
std::map<Edge, int32_t> mEdgeMap;
/**
Final triangles.
*/
std::vector<Triangle> mResultTriangles;
int32_t addVerticeIfNotExist(const Vertex& p);
int32_t addEdge(const Edge& e);
int32_t findEdge(const Edge& e);
void collapseEdge(int32_t id);
void divideEdge(int32_t id);
void updateVertEdgeInfo();
void updateEdgeTriangleInfo();
void relax(int32_t iterations, float factor, std::vector<Vertex>& vertices);
void recalcNoiseDirs();
std::vector<bool> mRestrictionFlag;
std::vector<EdgeFlag> mEdgeFlag;
std::vector<EdgeToTriangles> mTrMeshEdToTr;
std::vector<int32_t> mVertexValence;
std::vector<std::vector<int32_t> > mVertexToTriangleMap;
std::vector<float> mVerticesDistances;
std::vector<nvidia::NvVec3> mVerticesNormalsSmoothed;
std::vector<uint32_t> mPositionMappedVrt;
std::vector<std::vector<int32_t> > mGeometryGraph;
void prebuildEdgeFlagArray();
void computePositionedMapping();
void computeFalloffAndNormals();
void prebuildTesselatedTriangles();
};
} // namespace Blast
} // namespace Nv
#endif // ! NVBLASTEXTAUTHORINGMESHNOISER_H
| 7,224 | C | 36.435233 | 135 | 0.500415 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringTriangulator.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
// This warning arises when using some stl containers with older versions of VC
// c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code
#include "NvPreprocessor.h"
#if NV_VC && NV_VC < 14
#pragma warning(disable : 4702)
#endif
#include "NvBlastExtAuthoringTriangulator.h"
#include "NvBlastExtAuthoringMesh.h"
#include "NvBlastExtAuthoringTypes.h"
#include "NvPreprocessor.h"
#include "NvBlastExtAuthoringBooleanToolImpl.h"
#include <NvBlastAssert.h>
#include <NvBlastNvSharedHelpers.h>
#include <math.h>
#include <algorithm>
#include <list>
#include <queue>
#include <set>
#include <vector>
using nvidia::NvVec2;
using nvidia::NvVec3;
namespace Nv
{
namespace Blast
{
// used with ear clipping algorithm to deal with floating point precision artifacts for nearly co-linear points
#define MIN_ANGLE (0.0001f)
// helper for ear clipping algorithm
// holds the vertex indices for the previous and next vertex in the facet
// along with the scaled area of the triangle defined by the 3 vertices
struct AdjVertInfo
{
uint32_t prev;
uint32_t next;
float scaledArea;
};
NV_FORCE_INLINE bool compareTwoFloats(float a, float b)
{
return std::abs(b - a) <= FLT_EPSILON * std::abs(b + a);
}
NV_FORCE_INLINE bool compareTwoVertices(const NvVec3& a, const NvVec3& b)
{
return compareTwoFloats(a.x, b.x) && compareTwoFloats(a.y, b.y) && compareTwoFloats(a.z, b.z);
}
NV_FORCE_INLINE bool compareTwoVertices(const NvVec2& a, const NvVec2& b)
{
return compareTwoFloats(a.x, b.x) && compareTwoFloats(a.y, b.y);
}
NV_FORCE_INLINE float getRotation(const NvVec2& a, const NvVec2& b)
{
return a.x * b.y - a.y * b.x;
}
NV_FORCE_INLINE bool pointInside(
const NvVec2& ba, const NvVec2& cb, const NvVec2& ac,
const NvVec2& a, const NvVec2& b, const NvVec2& c,
const NvVec2& pnt
) {
// Co-positional verts are not considered inside because that would break the exterior of the facet
if (compareTwoVertices(a, pnt) || compareTwoVertices(b, pnt) || compareTwoVertices(c, pnt))
{
return false;
}
const float v1 = getRotation(ba, (pnt - a).getNormalized());
const float v2 = getRotation(cb, (pnt - b).getNormalized());
const float v3 = getRotation(ac, (pnt - c).getNormalized());
// If the sign of all angles match, then the point is inside
// A 0 angle is considered inside because otherwise verts would get dropped during triangulation
return (v1 >= -MIN_ANGLE && v2 >= -MIN_ANGLE && v3 >= -MIN_ANGLE) ||
(v1 <= MIN_ANGLE && v2 <= MIN_ANGLE && v3 <= MIN_ANGLE);
}
static void updatePotentialEar(
uint32_t curr,
const Vertex* vert,
const ProjectionDirections& dir,
const std::map<uint32_t, AdjVertInfo>& adjVertInfoMap,
const std::list<uint32_t>& reflexVerts,
std::list<uint32_t>& potentialEars
) {
// remove from potential list if it exists already
// it will be added back if it is still a valid potential ear
const auto itr = std::find(potentialEars.begin(), potentialEars.end(), curr);
if (itr != potentialEars.end())
{
potentialEars.erase(itr);
}
// doing it this way so the map can be passed as a const reference, but it should always be fully populated
const auto mapItr = adjVertInfoMap.find(curr);
if (mapItr == adjVertInfoMap.end())
{
NVBLAST_ASSERT_WITH_MESSAGE(false, "this should never happen");
return;
}
// only convex verts need to be considered for potential ears
const AdjVertInfo& adjVertInfo = mapItr->second;
if (adjVertInfo.scaledArea <= 0.0f)
{
return;
}
// only need to check against reflex verts to see if they are inside potential ears
// convex verts can't be inside potential ears
if (reflexVerts.size())
{
const Vertex cV = vert[curr];
const Vertex pV = vert[adjVertInfo.prev];
const Vertex nV = vert[adjVertInfo.next];
const NvVec2 cVp = getProjectedPoint(cV.p, dir);
const NvVec2 pVp = getProjectedPoint(pV.p, dir);
const NvVec2 nVp = getProjectedPoint(nV.p, dir);
// if there are no other verts inside, then it is a potential ear
const NvVec2 ba = (nVp - cVp).getNormalized();
const NvVec2 cb = (pVp - nVp).getNormalized();
const NvVec2 ac = (cVp - pVp).getNormalized();
for (uint32_t vrt : reflexVerts)
{
// ignore reflex verts that are part of the tri being tested
if (vrt == adjVertInfo.prev || vrt == adjVertInfo.next)
{
continue;
}
const NvVec2 pnt = getProjectedPoint(vert[vrt].p, dir);
if (pointInside(ba, cb, ac, cVp, nVp, pVp, pnt))
{
return;
}
}
}
potentialEars.push_back(curr);
}
static void updateVertData(
uint32_t curr,
uint32_t prev,
uint32_t next,
const Vertex* vert,
const ProjectionDirections& dir,
std::map<uint32_t, AdjVertInfo>& adjVertInfoMap,
std::list<uint32_t>& reflexVerts
) {
// remove the index from the reflex list if there is already an entry for it
// it will be added back if it is still a reflex vertex
const auto reflexItr = std::find(reflexVerts.begin(), reflexVerts.end(), curr);
if (reflexItr != reflexVerts.end())
{
reflexVerts.erase(reflexItr);
}
// if next == prev it isn't a valid triangle
// this will happen when the facet has less than 3 verts in it
// no need to add them as reflex verts at that point, the algorithm is finishing up the final pass
float scaledArea = 0.0f;
if (prev != next)
{
const Vertex cV = vert[curr];
const Vertex pV = vert[prev];
const Vertex nV = vert[next];
const NvVec2 cVp = getProjectedPoint(cV.p, dir);
const NvVec2 pVp = getProjectedPoint(pV.p, dir);
const NvVec2 nVp = getProjectedPoint(nV.p, dir);
const NvVec2 prevEdge = (cVp - pVp);
const NvVec2 nextEdge = (nVp - cVp);
// use normalized vectors to get a better calc for the angle between them
float rot = getRotation(prevEdge.getNormalized(), nextEdge.getNormalized());
if (dir & OPPOSITE_WINDING)
rot = -rot;
if (rot > MIN_ANGLE)
{
// this is a valid convex vertex, calculate 2 * area (used for sorting later)
// actual area isn't needed because it is only used to compare with other ears, so relative numbers are fine
scaledArea = getRotation(prevEdge, nextEdge);
if (dir & OPPOSITE_WINDING)
scaledArea = -scaledArea;
}
else
{
// the angle is roughly 180 or greater, consider it a reflex vertex
reflexVerts.push_back(curr);
}
}
// the scaled area will be used to sort potential ears later
adjVertInfoMap[curr] = {prev, next, scaledArea};
}
void Triangulator::triangulatePolygonWithEarClipping(const std::vector<uint32_t>& inputPolygon, const Vertex* vert,
const ProjectionDirections& dir)
{
uint32_t vCount = static_cast<uint32_t>(inputPolygon.size());
if (vCount < 3)
{
return;
}
// High level of ear clipping algorithm:
//
// - find potential ears (3 consecutive verts that form a triangle fully inside the facet with no other points from the facet inside or on an edge)
// while (potential ears)
// - sort the potential ears by area
// - add tri formed by largest ear to output and remove vert from the tip of the ear from the facet
// - update potential ears for remaining 2 verts in the tri
//
// This will ensure that no sliver triangles are created
// start by building up vertex data and a list of reflex (interior angle >= 180) verts
std::list<uint32_t> reflexVerts;
std::list<uint32_t> potentialEars;
std::map<uint32_t, AdjVertInfo> adjVertInfoMap;
for (uint32_t curr = 0; curr < vCount; curr++)
{
const uint32_t prev = (curr == 0) ? vCount - 1 : curr - 1;
const uint32_t next = (curr == vCount - 1) ? 0 : curr + 1;
const uint32_t currIdx = inputPolygon[curr];
const uint32_t prevIdx = inputPolygon[prev];
const uint32_t nextIdx = inputPolygon[next];
updateVertData(currIdx, prevIdx, nextIdx, vert, dir, adjVertInfoMap, reflexVerts);
}
// build the list of potential ears defined by convex verts by checking any reflex vert is inside
for (auto pair : adjVertInfoMap)
{
// if a vert is not a reflex, it must be convex and should be considered as an ear
const uint32_t currIdx = pair.first;
if (std::find(reflexVerts.begin(), reflexVerts.end(), currIdx) == reflexVerts.end())
{
updatePotentialEar(currIdx, vert, dir, adjVertInfoMap, reflexVerts, potentialEars);
}
}
// descending sort by scaled area
auto compArea = [&adjVertInfoMap](const uint32_t& a, const uint32_t& b) -> bool
{
return (adjVertInfoMap[a].scaledArea > adjVertInfoMap[b].scaledArea);
};
while (potentialEars.size())
{
// sort the potential ear list based on the area of the triangles they form
potentialEars.sort(compArea);
// add the largest triangle to the output
const uint32_t curr = potentialEars.front();
const AdjVertInfo& adjVertInfo = adjVertInfoMap[curr];
mBaseMeshTriangles.push_back(TriangleIndexed(curr, adjVertInfo.prev, adjVertInfo.next));
// remove the ear tip from the potential ear list
potentialEars.pop_front();
// update data for the other 2 verts involved
const uint32_t prevPrev = adjVertInfoMap[adjVertInfo.prev].prev;
const uint32_t nextNext = adjVertInfoMap[adjVertInfo.next].next;
// vert data must be updated first for both
updateVertData(adjVertInfo.prev, prevPrev, adjVertInfo.next, vert, dir, adjVertInfoMap, reflexVerts);
updateVertData(adjVertInfo.next, adjVertInfo.prev, nextNext, vert, dir, adjVertInfoMap, reflexVerts);
// then potential ear list
updatePotentialEar(adjVertInfo.prev, vert, dir, adjVertInfoMap, reflexVerts, potentialEars);
updatePotentialEar(adjVertInfo.next, vert, dir, adjVertInfoMap, reflexVerts, potentialEars);
}
}
struct LoopInfo
{
LoopInfo()
{
used = false;
}
NvVec3 normal;
float area;
int32_t index;
bool used;
bool operator<(const LoopInfo& b) const
{
return area < b.area;
}
};
int32_t unitePolygons(std::vector<uint32_t>& externalLoop, std::vector<uint32_t>& internalLoop, Vertex* vrx,
const ProjectionDirections& dir)
{
if (externalLoop.size() < 3 || internalLoop.size() < 3)
return 1;
/**
Find point with maximum x-coordinate
*/
float x_max = -MAXIMUM_EXTENT;
int32_t mIndex = -1;
for (uint32_t i = 0; i < internalLoop.size(); ++i)
{
float nx = getProjectedPoint(vrx[internalLoop[i]].p, dir).x;
if (nx > x_max)
{
mIndex = i;
x_max = nx;
}
}
if (mIndex == -1)
{
return 1;
}
/**
Search for base point on external loop
*/
float minX = MAXIMUM_EXTENT;
int32_t vrtIndex = -1;
bool isFromBuffer = 0;
NvVec2 holePoint = getProjectedPoint(vrx[internalLoop[mIndex]].p, dir);
NvVec2 computedPoint;
for (uint32_t i = 0; i < externalLoop.size(); ++i)
{
int32_t nx = (i + 1) % externalLoop.size();
NvVec2 pnt1 = getProjectedPoint(vrx[externalLoop[i]].p, dir);
NvVec2 pnt2 = getProjectedPoint(vrx[externalLoop[nx]].p, dir);
if (pnt1.x < x_max && pnt2.x < x_max)
{
continue;
}
NvVec2 vc = pnt2 - pnt1;
if (vc.y == 0 && pnt1.y == holePoint.y)
{
if (pnt1.x < minX && pnt1.x < pnt2.x && pnt1.x > x_max)
{
minX = pnt1.x;
vrtIndex = i;
isFromBuffer = true;
}
if (pnt2.x < minX && pnt2.x < pnt1.x && pnt2.x > x_max)
{
minX = pnt2.x;
vrtIndex = nx;
isFromBuffer = true;
}
}
else
{
float t = (holePoint.y - pnt1.y) / vc.y;
if (t <= 1 && t >= 0)
{
NvVec2 tempPoint = vc * t + pnt1;
if (tempPoint.x < minX && tempPoint.x > x_max)
{
minX = tempPoint.x;
vrtIndex = i;
isFromBuffer = false;
computedPoint = tempPoint;
}
}
}
}
if (vrtIndex == -1)
{
// std::cout << "Triangulation: base vertex for inner loop is not found..." << std::endl;
return 1;
}
int32_t bridgePoint = -1;
float bestAngle = 100;
if (!isFromBuffer)
{
NvVec2 ex1 = getProjectedPoint(vrx[externalLoop[vrtIndex]].p, dir);
NvVec2 ex2 = getProjectedPoint(vrx[externalLoop[(vrtIndex + 1) % externalLoop.size()]].p, dir);
if (ex1.x > ex2.x)
{
vrtIndex = (vrtIndex + 1) % externalLoop.size();
ex1 = ex2;
}
/* Check if some point is inside triangle */
bool notFound = true;
const NvVec2 ba = (ex1 - holePoint).getNormalized();
const NvVec2 cb = (computedPoint - ex1).getNormalized();
const NvVec2 ac = (holePoint - computedPoint).getNormalized();
for (int32_t i = 0; i < (int32_t)externalLoop.size(); ++i)
{
const NvVec2 tempPoint = getProjectedPoint(vrx[externalLoop[i]].p, dir);
if (pointInside(ba, cb, ac, holePoint, ex1, computedPoint, tempPoint))
{
notFound = false;
const NvVec2 cVp = getProjectedPoint(vrx[externalLoop[i]].p, dir);
const NvVec2 pVp =
getProjectedPoint(vrx[externalLoop[(i - 1 + externalLoop.size()) % externalLoop.size()]].p, dir);
const NvVec2 nVp = getProjectedPoint(vrx[externalLoop[(i + 1) % externalLoop.size()]].p, dir);
float rt = getRotation((cVp - pVp).getNormalized(), (nVp - pVp).getNormalized());
if (dir & OPPOSITE_WINDING)
rt = -rt;
if (rt < MIN_ANGLE)
continue;
const float tempAngle = NvVec2(1, 0).dot((tempPoint - holePoint).getNormalized());
if (bestAngle < tempAngle)
{
bestAngle = tempAngle;
bridgePoint = i;
}
}
}
if (notFound)
{
bridgePoint = vrtIndex;
}
if (bridgePoint == -1)
{
// std::cout << "Triangulation: bridge vertex for inner loop is not found..." << std::endl;
return 1;
}
}
else
{
bridgePoint = vrtIndex;
}
std::vector<uint32_t> temporal;
for (int32_t i = 0; i <= bridgePoint; ++i)
{
temporal.push_back(externalLoop[i]);
}
temporal.push_back(internalLoop[mIndex]);
for (int32_t i = (mIndex + 1) % internalLoop.size(); i != mIndex; i = (i + 1) % internalLoop.size())
{
temporal.push_back(internalLoop[i]);
}
temporal.push_back(internalLoop[mIndex]);
for (uint32_t i = bridgePoint; i < externalLoop.size(); ++i)
{
temporal.push_back(externalLoop[i]);
}
externalLoop = temporal;
return 0;
}
void Triangulator::buildPolygonAndTriangulate(std::vector<Edge>& edges, Vertex* vertices, int32_t userData,
int32_t materialId, int32_t smoothingGroup)
{
std::vector<std::vector<uint32_t> > serializedLoops;
std::set<int> visitedVertices;
std::vector<int> used(edges.size(), 0);
uint32_t collected = 0;
std::vector<int> edgesIds;
/**
Add first edge to polygon
*/
edgesIds.push_back(0);
visitedVertices.insert(edges[0].s);
visitedVertices.insert(edges[0].e);
used[0] = true;
collected = 1;
uint32_t lastEdge = 0;
bool successfullPass = false;
for (; collected < edges.size();)
{
successfullPass = false;
for (uint32_t p = 0; p < edges.size(); ++p)
{
if (used[p] == 0 && edges[p].s == edges[lastEdge].e)
{
successfullPass = true;
collected++;
used[p] = true;
edgesIds.push_back(p);
lastEdge = p;
if (visitedVertices.find(edges[p].e) != visitedVertices.end()) // if we formed loop, detach it and
// triangulate
{
serializedLoops.push_back(std::vector<uint32_t>());
std::vector<uint32_t>& serializedPositions = serializedLoops.back();
while (edgesIds.size() > 0)
{
serializedPositions.push_back(edges[edgesIds.back()].s);
visitedVertices.erase(edges[edgesIds.back()].s);
if (edges[edgesIds.back()].s == edges[p].e)
{
edgesIds.pop_back();
break;
}
edgesIds.pop_back();
}
if (edgesIds.size() > 0)
{
lastEdge = edgesIds.back();
}
else
{
for (uint32_t t = 0; t < edges.size(); ++t)
{
if (used[t] == 0)
{
edgesIds.push_back(t);
visitedVertices.insert(edges[t].s);
visitedVertices.insert(edges[t].e);
used[t] = true;
collected++;
lastEdge = t;
break;
}
}
}
}
else
{
visitedVertices.insert(edges[p].e);
}
}
}
if (!successfullPass)
{
break;
}
}
std::vector<LoopInfo> loopsInfo(serializedLoops.size());
// Compute normal to whole polygon, and areas of loops
NvVec3 wholeFacetNormal(0, 0, 0);
for (uint32_t loop = 0; loop < serializedLoops.size(); ++loop)
{
NvVec3 loopNormal(0, 0, 0);
const std::vector<uint32_t>& pos = serializedLoops[loop];
for (uint32_t vrt = 1; vrt + 1 < serializedLoops[loop].size(); ++vrt)
{
loopNormal += toNvShared(vertices[pos[vrt]].p - vertices[pos[0]].p)
.cross(toNvShared(vertices[pos[vrt + 1]].p - vertices[pos[0]].p));
}
loopsInfo[loop].area = loopNormal.magnitude();
loopsInfo[loop].normal = loopNormal;
loopsInfo[loop].index = loop;
wholeFacetNormal += loopNormal;
}
// Change areas signs according to winding direction
for (uint32_t loop = 0; loop < serializedLoops.size(); ++loop)
{
if (wholeFacetNormal.dot(loopsInfo[loop].normal) < 0)
{
loopsInfo[loop].area = -loopsInfo[loop].area;
}
}
const ProjectionDirections dir = getProjectionDirection(wholeFacetNormal);
std::sort(loopsInfo.begin(), loopsInfo.end());
std::vector<NvVec3> tempPositions;
int32_t oldSize = static_cast<int32_t>(mBaseMeshTriangles.size());
for (uint32_t extPoly = 0; extPoly < loopsInfo.size(); ++extPoly)
{
if (loopsInfo[extPoly].area < 0)
{
continue; // Polygon with negative area is hole
}
int32_t baseLoop = loopsInfo[extPoly].index;
for (uint32_t intPoly = 0; intPoly < loopsInfo.size(); ++intPoly)
{
if (loopsInfo[intPoly].area > 0 || loopsInfo[intPoly].used ||
std::abs(loopsInfo[intPoly].area) > loopsInfo[extPoly].area)
{
continue;
}
int32_t holeLoop = loopsInfo[intPoly].index;
if (!unitePolygons(serializedLoops[baseLoop], serializedLoops[holeLoop], vertices, dir))
{
loopsInfo[intPoly].used = true;
};
}
triangulatePolygonWithEarClipping(serializedLoops[baseLoop], vertices, dir);
}
for (uint32_t i = oldSize; i < mBaseMeshTriangles.size(); ++i)
{
mBaseMeshTriangles[i].userData = userData;
mBaseMeshTriangles[i].materialId = materialId;
mBaseMeshTriangles[i].smoothingGroup = smoothingGroup;
}
}
NV_FORCE_INLINE int32_t Triangulator::addVerticeIfNotExist(const Vertex& p)
{
auto it = mVertMap.find(p);
if (it == mVertMap.end())
{
mVertMap[p] = static_cast<int32_t>(mVertices.size());
mVertices.push_back(p);
return static_cast<int32_t>(mVertices.size()) - 1;
}
else
{
return it->second;
}
}
NV_FORCE_INLINE void Triangulator::addEdgeIfValid(EdgeWithParent& ed)
{
if (ed.s == ed.e)
return;
EdgeWithParent opposite(ed.e, ed.s, ed.parent);
auto it = mEdgeMap.find(opposite);
if (it == mEdgeMap.end())
{
mEdgeMap[ed] = static_cast<int32_t>(mBaseMeshEdges.size());
mBaseMeshEdges.push_back(ed);
}
else
{
if (mBaseMeshEdges[it->second].s == kNotValidVertexIndex)
{
mBaseMeshEdges[it->second].s = ed.s;
mBaseMeshEdges[it->second].e = ed.e;
}
else
{
mBaseMeshEdges[it->second].s = kNotValidVertexIndex;
}
}
}
void Triangulator::prepare(const Mesh* mesh)
{
const Edge* ed = mesh->getEdges();
const Vertex* vr = mesh->getVertices();
mBaseMapping.resize(mesh->getVerticesCount());
for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
{
const Facet* fc = mesh->getFacet(i);
for (uint32_t j = fc->firstEdgeNumber; j < fc->firstEdgeNumber + fc->edgesCount; ++j)
{
int32_t a = addVerticeIfNotExist(vr[ed[j].s]);
int32_t b = addVerticeIfNotExist(vr[ed[j].e]);
mBaseMapping[ed[j].s] = a;
mBaseMapping[ed[j].e] = b;
EdgeWithParent e(a, b, i);
addEdgeIfValid(e);
}
}
std::vector<EdgeWithParent> temp;
temp.reserve(mBaseMeshEdges.size());
for (uint32_t i = 0; i < mBaseMeshEdges.size(); ++i)
{
if (mBaseMeshEdges[i].s != kNotValidVertexIndex)
{
temp.push_back(mBaseMeshEdges[i]);
}
}
mBaseMeshEdges = temp;
}
void Triangulator::reset()
{
mVertices.clear();
mBaseMeshEdges.clear();
mVertMap.clear();
mEdgeMap.clear();
mBaseMeshTriangles.clear();
mBaseMeshResultTriangles.clear();
}
void Triangulator::triangulate(const Mesh* mesh)
{
reset();
if (mesh == nullptr || !mesh->isValid())
{
return;
}
prepare(mesh);
if (mBaseMeshEdges.empty())
{
return;
}
std::vector<Edge> temp;
uint32_t fP = mBaseMeshEdges[0].parent;
for (uint32_t i = 0; i < mBaseMeshEdges.size(); ++i)
{
if (fP != mBaseMeshEdges[i].parent)
{
if (temp.empty() == false)
{
buildPolygonAndTriangulate(temp, mVertices.data(), mesh->getFacet(fP)->userData,
mesh->getFacet(fP)->materialId, mesh->getFacet(fP)->smoothingGroup);
}
temp.clear();
fP = mBaseMeshEdges[i].parent;
}
temp.push_back({ mBaseMeshEdges[i].s, mBaseMeshEdges[i].e });
}
buildPolygonAndTriangulate(temp, mVertices.data(), mesh->getFacet(fP)->userData, mesh->getFacet(fP)->materialId,
mesh->getFacet(fP)->smoothingGroup);
/* Build final triangles */
mBaseMeshResultTriangles.clear();
for (uint32_t i = 0; i < mBaseMeshTriangles.size(); ++i)
{
if (mBaseMeshTriangles[i].ea == kNotValidVertexIndex)
{
continue;
}
mBaseMeshResultTriangles.push_back({ mVertices[mBaseMeshTriangles[i].ea], mVertices[mBaseMeshTriangles[i].eb],
mVertices[mBaseMeshTriangles[i].ec], mBaseMeshTriangles[i].userData,
mBaseMeshTriangles[i].materialId, mBaseMeshTriangles[i].smoothingGroup });
}
mBaseMeshUVFittedTriangles = mBaseMeshResultTriangles; // Uvs will be fitted later, in FractureTool.
computePositionedMapping();
}
void Triangulator::computePositionedMapping()
{
std::map<NvcVec3, int32_t, VrtPositionComparator> mPosMap;
mPositionMappedVrt.clear();
mPositionMappedVrt.resize(mVertices.size());
for (uint32_t i = 0; i < mVertices.size(); ++i)
{
auto it = mPosMap.find(mVertices[i].p);
if (it == mPosMap.end())
{
mPosMap[mVertices[i].p] = i;
mPositionMappedVrt[i] = i;
}
else
{
mPositionMappedVrt[i] = it->second;
}
}
}
} // namespace Blast
} // namespace Nv | 27,321 | C++ | 34.391192 | 151 | 0.576882 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBooleanToolImpl.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastGlobals.h"
#include "NvBlastExtAuthoringBooleanToolImpl.h"
#include "NvBlastExtAuthoringBooleanTool.h"
#include "NvBlastExtAuthoringMeshImpl.h"
#include "NvBlastExtAuthoringAcceleratorImpl.h"
#include <NvBlastNvSharedHelpers.h>
#include <math.h>
#include <set>
#include <algorithm>
using nvidia::NvBounds3;
namespace Nv
{
namespace Blast
{
/* Linear interpolation of vectors */
NV_FORCE_INLINE void vec3Lerp(const NvcVec3& a, const NvcVec3& b, NvcVec3& out, float t)
{
out.x = (b.x - a.x) * t + a.x;
out.y = (b.y - a.y) * t + a.y;
out.z = (b.z - a.z) * t + a.z;
}
NV_FORCE_INLINE void vec2Lerp(const NvcVec2& a, const NvcVec2& b, NvcVec2& out, float t)
{
out.x = (b.x - a.x) * t + a.x;
out.y = (b.y - a.y) * t + a.y;
}
NV_FORCE_INLINE int32_t BooleanEvaluator::addIfNotExist(const Vertex& p)
{
mVerticesAggregate.push_back(p);
return static_cast<int32_t>(mVerticesAggregate.size()) - 1;
}
NV_FORCE_INLINE void BooleanEvaluator::addEdgeIfValid(const EdgeWithParent& ed)
{
mEdgeAggregate.push_back(ed);
}
/**
Vertex level shadowing functions
*/
NV_FORCE_INLINE int32_t vertexShadowing(const NvcVec3& a, const NvcVec3& b)
{
return (b.x >= a.x) ? 1 : 0;
}
/**
Vertex-edge status functions
*/
NV_FORCE_INLINE int32_t veStatus01(const NvcVec3& sEdge, const NvcVec3& eEdge, const NvcVec3& p)
{
return vertexShadowing(p, eEdge) - vertexShadowing(p, sEdge);
}
NV_FORCE_INLINE int32_t veStatus10(const NvcVec3& sEdge, const NvcVec3& eEdge, const NvcVec3& p)
{
return -vertexShadowing(eEdge, p) + vertexShadowing(sEdge, p);
}
bool shouldSwap(const NvcVec3& a, const NvcVec3& b)
{
if (a.x < b.x) return false;
if (a.x > b.x) return true;
if (a.y < b.y) return false;
if (a.y > b.y) return true;
if (a.z < b.z) return false;
if (a.z > b.z) return true;
return false;
}
/**
Vertex-edge shadowing functions
*/
int32_t shadowing01(Vertex sEdge, Vertex eEdge, const NvcVec3& p, Vertex& onEdgePoint, bool& hasOnEdge)
{
int32_t winding = veStatus01(sEdge.p, eEdge.p, p);
if (sEdge.p.x > eEdge.p.x)
{
std::swap(sEdge, eEdge);
}
if (winding != 0)
{
float t = (p.x - sEdge.p.x) / (eEdge.p.x - sEdge.p.x);
if (t >= 1)
{
onEdgePoint = eEdge;
}
else if (t <= 0)
{
onEdgePoint = sEdge;
}
else
{
vec3Lerp(sEdge.p, eEdge.p, onEdgePoint.p, t);
vec3Lerp(sEdge.n, eEdge.n, onEdgePoint.n, t);
vec2Lerp(sEdge.uv[0], eEdge.uv[0], onEdgePoint.uv[0], t);
}
hasOnEdge = true;
if (onEdgePoint.p.y >= p.y)
{
return winding;
}
}
else
{
hasOnEdge = false;
}
return 0;
}
int32_t shadowing10(Vertex sEdge, Vertex eEdge, const NvcVec3& p, Vertex& onEdgePoint, bool& hasOnEdge)
{
int32_t winding = veStatus10(sEdge.p, eEdge.p, p);
if (sEdge.p.x > eEdge.p.x)
{
std::swap(sEdge, eEdge);
}
if (winding != 0)
{
float t = (p.x - sEdge.p.x) / (eEdge.p.x - sEdge.p.x);
if (t >= 1)
{
onEdgePoint = eEdge;
}
else if (t <= 0)
{
onEdgePoint = sEdge;
}
else
{
vec3Lerp(sEdge.p, eEdge.p, onEdgePoint.p, t);
vec3Lerp(sEdge.n, eEdge.n, onEdgePoint.n, t);
vec2Lerp(sEdge.uv[0], eEdge.uv[0], onEdgePoint.uv[0], t);
}
hasOnEdge = true;
if (onEdgePoint.p.y < p.y)
{
return winding;
}
}
else
{
hasOnEdge = false;
}
return 0;
}
int32_t shadowing01(NvcVec3 sEdge, NvcVec3 eEdge, const NvcVec3& p)
{
int32_t winding = veStatus01(sEdge, eEdge, p);
if (winding != 0)
{
if (sEdge.x > eEdge.x)
{
std::swap(sEdge, eEdge);
}
float t = ((p.x - sEdge.x) / (eEdge.x - sEdge.x));
NvcVec3 onEdgePoint;
if (t >= 1)
onEdgePoint = eEdge;
else if (t <= 0)
onEdgePoint = sEdge;
else
vec3Lerp(sEdge, eEdge, onEdgePoint, t);
if (onEdgePoint.y >= p.y)
{
return winding;
}
}
return 0;
}
int32_t shadowing10(NvcVec3 sEdge, NvcVec3 eEdge, const NvcVec3& p)
{
int32_t winding = veStatus10(sEdge, eEdge, p);
if (winding != 0)
{
if (sEdge.x > eEdge.x)
{
std::swap(sEdge, eEdge);
}
float t = ((p.x - sEdge.x) / (eEdge.x - sEdge.x));
NvcVec3 onEdgePoint;
if (t >= 1)
onEdgePoint = eEdge;
else if (t <= 0)
onEdgePoint = sEdge;
else
vec3Lerp(sEdge, eEdge, onEdgePoint, t);
if (onEdgePoint.y < p.y)
{
return winding;
}
}
return 0;
}
/**
Vertex-facet shadowing functions
*/
int32_t vfStatus02(const NvcVec3& p, const Vertex* points, const Edge* edges, int32_t edgesCount, Vertex* out)
{
int32_t val = 0;
Vertex pnt;
bool hasOnEdge = false;
out[0].p.y = -MAXIMUM_EXTENT;
out[1].p.y = MAXIMUM_EXTENT;
for (int32_t i = 0; i < edgesCount; ++i)
{
val -= shadowing01(points[edges->s], points[edges->e], p, pnt, hasOnEdge);
if (hasOnEdge != 0)
{
if (p.y > pnt.p.y && pnt.p.y > out[0].p.y)
{
out[0] = pnt;
}
if (p.y <= pnt.p.y && pnt.p.y < out[1].p.y)
{
out[1] = pnt;
}
}
++edges;
}
return val;
}
int32_t shadowing02(const NvcVec3& p, const Vertex* points, const Edge* edges, int edgesCount, bool& hasOnFacetPoint, Vertex& onFacetPoint)
{
Vertex outp[2];
int32_t stat = vfStatus02(p, points, edges, edgesCount, outp);
float z = 0;
hasOnFacetPoint = false;
if (stat != 0)
{
Vertex& p1 = outp[0];
Vertex& p2 = outp[1];
NvcVec3 vc = p2.p - p1.p;
float t = 0;
t = (std::abs(vc.x) > std::abs(vc.y)) ? (p.x - p1.p.x) / vc.x : (p.y - p1.p.y) / vc.y;
t = nvidia::NvClamp(t, 0.0f, 1.0f);
z = t * vc.z + p1.p.z;
hasOnFacetPoint = true;
onFacetPoint.p.x = p.x;
onFacetPoint.p.y = p.y;
onFacetPoint.p.z = z;
vec2Lerp(p1.uv[0], p2.uv[0], onFacetPoint.uv[0], t);
vec3Lerp(p1.n, p2.n, onFacetPoint.n, t);
if (z >= p.z)
{
return stat;
}
}
return 0;
}
int32_t vfStatus20(const NvcVec3& p, const Vertex* points, const Edge* edges, int32_t edgesCount, Vertex* out)
{
int32_t val = 0;
Vertex pnt;
bool hasOnEdge = false;
out[0].p.y = -MAXIMUM_EXTENT;
out[1].p.y = MAXIMUM_EXTENT;
for (int32_t i = 0; i < edgesCount; ++i)
{
val += shadowing10(points[edges->s], points[edges->e], p, pnt, hasOnEdge);
if (hasOnEdge != 0)
{
if (p.y > pnt.p.y && pnt.p.y > out[0].p.y)
{
out[0] = pnt;
}
if (p.y <= pnt.p.y && pnt.p.y < out[1].p.y)
{
out[1] = pnt;
}
}
++edges;
}
return val;
}
int32_t shadowing20(const NvcVec3& p, const Vertex* points, const Edge* edges, int edgesCount, bool& hasOnFacetPoint, Vertex& onFacetPoint)
{
Vertex outp[2];
int32_t stat = vfStatus20(p, points, edges, edgesCount, outp);
hasOnFacetPoint = false;
if (stat != 0)
{
Vertex& p1 = outp[0];
Vertex& p2 = outp[1];
NvcVec3 vc = p2.p - p1.p;
float t = 0;
t = (std::abs(vc.x) > std::abs(vc.y)) ? (p.x - p1.p.x) / vc.x : (p.y - p1.p.y) / vc.y;
t = nvidia::NvClamp(t, 0.0f, 1.0f);
hasOnFacetPoint = true;
onFacetPoint.p.x = p.x;
onFacetPoint.p.y = p.y;
onFacetPoint.p.z = t * vc.z + p1.p.z;
vec2Lerp(p1.uv[0], p2.uv[0], onFacetPoint.uv[0], t);
vec3Lerp(p1.n, p2.n, onFacetPoint.n, t);
if (onFacetPoint.p.z < p.z)
{
return stat;
}
}
return 0;
}
NV_FORCE_INLINE int32_t edgesCrossCheck(const NvcVec3& eAs, const NvcVec3& eAe, const NvcVec3& eBs, const NvcVec3& eBe)
{
return shadowing01(eBs, eBe, eAe) - shadowing01(eBs, eBe, eAs) + shadowing10(eAs, eAe, eBe) - shadowing10(eAs, eAe, eBs);
}
int32_t edgesIntersection(const Vertex& eAs, const Vertex& eAe, const Vertex& eBs, const Vertex& eBe, Vertex& intersectionA, Vertex& intersectionB, bool& hasPoints)
{
int32_t status = edgesCrossCheck(eAs.p, eAe.p, eBs.p, eBe.p);
hasPoints = false;
if (status == 0)
{
return 0;
}
Vertex tempPoint;
Vertex bShadowingPair[2];
Vertex aShadowingPair[2];
bool hasOnEdge = false;
bool aShadowing = false;
bool bShadowing = false;
/**
Search for two pairs where parts of A shadows B, and where B shadows A.
Needed for search intersection point.
*/
for (auto p : { &eBs, &eBe })
{
int32_t shadowingType = shadowing10(eAs, eAe, p->p, tempPoint, hasOnEdge);
if (shadowingType == 0 && !aShadowing && hasOnEdge)
{
aShadowing = true;
aShadowingPair[0] = *p;
aShadowingPair[1] = tempPoint;
}
else
{
if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
{
bShadowing = true;
bShadowingPair[0] = *p;
bShadowingPair[1] = tempPoint;
}
}
}
if (!aShadowing || !bShadowing)
{
for (auto p : { &eAs, &eAe })
{
int32_t shadowingType = shadowing01(eBs, eBe, p->p, tempPoint, hasOnEdge);
if (shadowingType == 0 && !aShadowing && hasOnEdge)
{
aShadowing = true;
aShadowingPair[1] = *p;
aShadowingPair[0] = tempPoint;
}
else
{
if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
{
bShadowing = true;
bShadowingPair[1] = *p;
bShadowingPair[0] = tempPoint;
}
}
}
}
float deltaPlus = bShadowingPair[0].p.y - bShadowingPair[1].p.y;
float deltaMinus = aShadowingPair[0].p.y - aShadowingPair[1].p.y;
float div = 0;
if (deltaPlus > 0)
div = deltaPlus / (deltaPlus - deltaMinus);
else
div = 0;
intersectionA.p = bShadowingPair[1].p - div * (bShadowingPair[1].p - aShadowingPair[1].p);
intersectionA.n = bShadowingPair[1].n - div * (bShadowingPair[1].n - aShadowingPair[1].n);
intersectionA.uv[0] = bShadowingPair[1].uv[0] - (bShadowingPair[1].uv[0] - aShadowingPair[1].uv[0]) * div;
intersectionB.p = intersectionA.p;
intersectionB.p.z = bShadowingPair[0].p.z - div * (bShadowingPair[0].p.z - aShadowingPair[0].p.z);
intersectionB.n = bShadowingPair[0].n - div * (bShadowingPair[0].n - aShadowingPair[0].n);
intersectionB.uv[0] = bShadowingPair[0].uv[0] - (bShadowingPair[0].uv[0] - aShadowingPair[0].uv[0]) * div;
hasPoints = true;
return status;
}
NV_FORCE_INLINE int32_t edgeEdgeShadowing(const Vertex& eAs, const Vertex& eAe, const Vertex& eBs, const Vertex& eBe, Vertex& intersectionA, Vertex& intersectionB, bool& hasPoints)
{
int32_t status = edgesIntersection(eAs, eAe, eBs, eBe, intersectionA, intersectionB, hasPoints);
if (intersectionB.p.z >= intersectionA.p.z)
{
return status;
}
return 0;
}
int32_t edgeFacetIntersection12(const Vertex& edSt, const Vertex& edEnd, const Vertex* points, const Edge* edges, int edgesCount, Vertex& intersectionA, Vertex& intersectionB)
{
int32_t status = 0;
Vertex p1, p2;
Vertex bShadowingPair[2];
Vertex aShadowingPair[2];
bool hasPoint = false;
bool aShadowing = false;
bool bShadowing = false;
int32_t mlt = -1;
int32_t shadowingType;
for (auto p : { &edEnd, &edSt })
{
shadowingType = shadowing02(p->p, points, edges, edgesCount, hasPoint, p1);
status += mlt * shadowingType;
if (shadowingType == 0 && !aShadowing && hasPoint)
{
aShadowing = true;
aShadowingPair[0] = p1;
aShadowingPair[1] = *p;
}
else if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
{
bShadowing = true;
bShadowingPair[0] = p1;
bShadowingPair[1] = *p;
}
mlt = 1;
}
for (int32_t ed = 0; ed < edgesCount; ++ed)
{
if (shouldSwap(points[edges[ed].s].p, points[edges[ed].e].p))
{
shadowingType = -edgeEdgeShadowing(edSt, edEnd, points[edges[ed].e], points[edges[ed].s], p1, p2, hasPoint);
}
else
{
shadowingType = edgeEdgeShadowing(edSt, edEnd, points[edges[ed].s], points[edges[ed].e], p1, p2, hasPoint);
}
status -= shadowingType;
if (shadowingType == 0 && !aShadowing && hasPoint)
{
aShadowing = true;
aShadowingPair[0] = p2;
aShadowingPair[1] = p1;
}
else if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
{
bShadowing = true;
bShadowingPair[0] = p2;
bShadowingPair[1] = p1;
}
}
if (!status || !bShadowing || !aShadowing)
{
return 0;
}
float deltaPlus = bShadowingPair[0].p.z - bShadowingPair[1].p.z;
float div = 0;
if (deltaPlus != 0)
{
float deltaMinus = aShadowingPair[0].p.z - aShadowingPair[1].p.z;
div = deltaPlus / (deltaPlus - deltaMinus);
}
intersectionA.p = bShadowingPair[1].p - div * (bShadowingPair[1].p - aShadowingPair[1].p);
intersectionA.n = bShadowingPair[1].n - div * (bShadowingPair[1].n - aShadowingPair[1].n);
intersectionA.uv[0] = bShadowingPair[1].uv[0] - (bShadowingPair[1].uv[0] - aShadowingPair[1].uv[0]) * div;
intersectionB.p = intersectionA.p;
intersectionB.n = bShadowingPair[0].n - div * (bShadowingPair[0].n - aShadowingPair[0].n);
intersectionB.uv[0] = bShadowingPair[0].uv[0] - (bShadowingPair[0].uv[0] - aShadowingPair[0].uv[0]) * div;
return status;
}
int32_t edgeFacetIntersection21(const Vertex& edSt, const Vertex& edEnd, const Vertex* points, const Edge* edges, int edgesCount, Vertex& intersectionA, Vertex& intersectionB)
{
int32_t status = 0;
Vertex p1, p2;
Vertex bShadowingPair[2];
Vertex aShadowingPair[2];
bool hasPoint = false;
bool aShadowing = false;
bool bShadowing = false;
int32_t shadowingType;
int32_t mlt = 1;
for (auto p : { &edEnd, &edSt })
{
shadowingType = shadowing20(p->p, points, edges, edgesCount, hasPoint, p1);
status += mlt * shadowingType;
if (shadowingType == 0 && !aShadowing && hasPoint)
{
aShadowing = true;
aShadowingPair[0] = *p;
aShadowingPair[1] = p1;
}
else if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
{
bShadowing = true;
bShadowingPair[0] = *p;
bShadowingPair[1] = p1;
}
mlt = -1;
}
for (int32_t ed = 0; ed < edgesCount; ++ed)
{
if (shouldSwap(points[edges[ed].s].p, points[edges[ed].e].p))
{
shadowingType = -edgeEdgeShadowing(points[edges[ed].e], points[edges[ed].s], edSt, edEnd, p1, p2, hasPoint);
}
else
{
shadowingType = edgeEdgeShadowing(points[edges[ed].s], points[edges[ed].e], edSt, edEnd, p1, p2, hasPoint);
}
status -= shadowingType;
if (shadowingType == 0)
{
if (!aShadowing && hasPoint)
{
aShadowing = true;
aShadowingPair[0] = p2;
aShadowingPair[1] = p1;
}
}
else
{
if ((shadowingType == 1 || shadowingType == -1) && !bShadowing)
{
bShadowing = true;
bShadowingPair[0] = p2;
bShadowingPair[1] = p1;
}
}
}
if (!status || !bShadowing || !aShadowing)
{
return 0;
}
float deltaPlus = bShadowingPair[0].p.z - bShadowingPair[1].p.z;
float div = 0;
if (deltaPlus != 0)
{
float deltaMinus = aShadowingPair[0].p.z - aShadowingPair[1].p.z;
div = deltaPlus / (deltaPlus - deltaMinus);
}
intersectionA.p = bShadowingPair[1].p - div * (bShadowingPair[1].p - aShadowingPair[1].p);
intersectionA.n = bShadowingPair[1].n - div * (bShadowingPair[1].n - aShadowingPair[1].n);
intersectionA.uv[0] = bShadowingPair[1].uv[0] - (bShadowingPair[1].uv[0] - aShadowingPair[1].uv[0]) * div;
intersectionB.p = intersectionA.p;
intersectionB.n = bShadowingPair[0].n - div * (bShadowingPair[0].n - aShadowingPair[0].n);
intersectionB.uv[0] = bShadowingPair[0].uv[0] - (bShadowingPair[0].uv[0] - aShadowingPair[0].uv[0]) * div;
return status;
}
int32_t BooleanEvaluator::vertexMeshStatus03(const NvcVec3& p, const Mesh* mesh)
{
int32_t status = 0;
Vertex pnt;
bool hasPoint = false;
mAcceleratorB->setState(p);
int32_t facet = mAcceleratorB->getNextFacet();
while (facet != -1)
{
const Edge* ed = mesh->getEdges() + mesh->getFacet(facet)->firstEdgeNumber;
status += shadowing02(p, mesh->getVertices(), ed, mesh->getFacet(facet)->edgesCount, hasPoint, pnt);
facet = mAcceleratorB->getNextFacet();
}
return status;
}
int32_t BooleanEvaluator::vertexMeshStatus30(const NvcVec3& p, const Mesh* mesh)
{
int32_t status = 0;
bool hasPoints = false;
Vertex point;
mAcceleratorA->setState(p);
int32_t facet = mAcceleratorA->getNextFacet();
while ( facet != -1)
{
const Edge* ed = mesh->getEdges() + mesh->getFacet(facet)->firstEdgeNumber;
status -= shadowing20(p, mesh->getVertices(), ed, mesh->getFacet(facet)->edgesCount, hasPoints, point);
facet = mAcceleratorA->getNextFacet();
}
return status;
}
NV_FORCE_INLINE int32_t inclusionValue03(const BooleanConf& conf, int32_t xValue)
{
return conf.ca + conf.ci * xValue;
}
NV_FORCE_INLINE int32_t inclusionValueEdgeFace(const BooleanConf& conf, int32_t xValue)
{
return conf.ci * xValue;
}
NV_FORCE_INLINE int32_t inclusionValue30(const BooleanConf& conf, int32_t xValue)
{
return conf.cb + conf.ci * xValue;
}
struct VertexComparator
{
VertexComparator(NvcVec3 base = NvcVec3()) : basePoint(base) {};
NvcVec3 basePoint;
bool operator()(const Vertex& a, const Vertex& b)
{
return ((b.p - a.p) | basePoint) > 0.0;
}
};
struct VertexPairComparator
{
VertexPairComparator(NvcVec3 base = NvcVec3()) : basePoint(base) {};
NvcVec3 basePoint;
bool operator()(const std::pair<Vertex, Vertex>& a, const std::pair<Vertex, Vertex>& b)
{
return ((b.first.p - a.first.p) | basePoint) > 0.0;
}
};
int32_t BooleanEvaluator::isPointContainedInMesh(const Mesh* msh, const NvcVec3& point)
{
if (msh == nullptr)
{
return 0;
}
DummyAccelerator dmAccel(msh->getFacetCount());
mAcceleratorA = &dmAccel;
return vertexMeshStatus30(point, msh);
}
int32_t BooleanEvaluator::isPointContainedInMesh(const Mesh* msh, SpatialAccelerator* spAccel, const NvcVec3& point)
{
if (msh == nullptr)
{
return 0;
}
mAcceleratorA = spAccel;
return vertexMeshStatus30(point, msh);
}
void BooleanEvaluator::buildFaceFaceIntersections(const BooleanConf& mode)
{
int32_t statusValue = 0;
int32_t inclusionValue = 0;
std::vector<std::pair<Vertex, Vertex> > retainedStarts;
std::vector<std::pair<Vertex, Vertex>> retainedEnds;
VertexPairComparator comp;
Vertex newPointA;
Vertex newPointB;
const Vertex* meshAPoints = mMeshA->getVertices();
const Vertex* meshBPoints = mMeshB->getVertices();
EdgeWithParent newEdge;
mEdgeFacetIntersectionData12.clear();
mEdgeFacetIntersectionData21.clear();
mEdgeFacetIntersectionData12.resize(mMeshA->getFacetCount());
mEdgeFacetIntersectionData21.resize(mMeshB->getFacetCount());
for (uint32_t facetB = 0; facetB < mMeshB->getFacetCount(); ++facetB)
{
mAcceleratorA->setState(meshBPoints, mMeshB->getEdges(), *mMeshB->getFacet(facetB));
int32_t facetA = mAcceleratorA->getNextFacet();
while (facetA != -1)
{
const Edge* facetBEdges = mMeshB->getEdges() + mMeshB->getFacet(facetB)->firstEdgeNumber;
const Edge* facetAEdges = mMeshA->getEdges() + mMeshA->getFacet(facetA)->firstEdgeNumber;
const Edge* fbe = facetBEdges;
const Edge* fae = facetAEdges;
retainedStarts.clear();
retainedEnds.clear();
NvcVec3 compositeEndPoint = {0, 0, 0};
NvcVec3 compositeStartPoint = {0, 0, 0};
uint32_t facetAEdgeCount = mMeshA->getFacet(facetA)->edgesCount;
uint32_t facetBEdgeCount = mMeshB->getFacet(facetB)->edgesCount;
int32_t ic = 0;
for (uint32_t i = 0; i < facetAEdgeCount; ++i)
{
if (shouldSwap(meshAPoints[fae->e].p, meshAPoints[fae->s].p))
{
statusValue = -edgeFacetIntersection12(meshAPoints[fae->e], meshAPoints[fae->s], meshBPoints, facetBEdges, facetBEdgeCount, newPointA, newPointB);
}
else
{
statusValue = edgeFacetIntersection12(meshAPoints[fae->s], meshAPoints[fae->e], meshBPoints, facetBEdges, facetBEdgeCount, newPointA, newPointB);
}
inclusionValue = -inclusionValueEdgeFace(mode, statusValue);
if (inclusionValue > 0)
{
for (ic = 0; ic < inclusionValue; ++ic)
{
retainedEnds.push_back(std::make_pair(newPointA, newPointB));
compositeEndPoint = compositeEndPoint + newPointA.p;
}
mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA));
}
if (inclusionValue < 0)
{
for (ic = 0; ic < -inclusionValue; ++ic)
{
retainedStarts.push_back(std::make_pair(newPointA, newPointB));
compositeStartPoint = compositeStartPoint + newPointA.p;
}
mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA));
}
fae++;
}
for (uint32_t i = 0; i < facetBEdgeCount; ++i)
{
if (shouldSwap(meshBPoints[fbe->e].p, meshBPoints[fbe->s].p))
{
statusValue = -edgeFacetIntersection21(meshBPoints[fbe->e], meshBPoints[fbe->s], meshAPoints, facetAEdges, facetAEdgeCount, newPointA, newPointB);
}
else
{
statusValue = edgeFacetIntersection21(meshBPoints[fbe->s], meshBPoints[fbe->e], meshAPoints, facetAEdges, facetAEdgeCount, newPointA, newPointB);
}
inclusionValue = inclusionValueEdgeFace(mode, statusValue);
if (inclusionValue > 0)
{
for (ic = 0; ic < inclusionValue; ++ic)
{
retainedEnds.push_back(std::make_pair(newPointA, newPointB));
compositeEndPoint = compositeEndPoint + newPointB.p;
}
mEdgeFacetIntersectionData21[facetB].push_back(EdgeFacetIntersectionData( i, statusValue, newPointB));
}
if (inclusionValue < 0)
{
for (ic = 0; ic < -inclusionValue; ++ic)
{
retainedStarts.push_back(std::make_pair(newPointA, newPointB));
compositeStartPoint = compositeStartPoint + newPointB.p;
}
mEdgeFacetIntersectionData21[facetB].push_back(EdgeFacetIntersectionData(i, statusValue, newPointB));
}
fbe++;
}
if (retainedStarts.size() != retainedEnds.size())
{
NVBLAST_LOG_ERROR("Not equal number of starting and ending vertices! Probably input mesh has open edges.");
return;
}
for (uint32_t rv = 0; rv < retainedStarts.size(); ++rv)
{
newEdge.s = addIfNotExist(retainedStarts[rv].first);
newEdge.e = addIfNotExist(retainedEnds[rv].first);
newEdge.parent = facetA;
addEdgeIfValid(newEdge);
newEdge.parent = facetB + mMeshA->getFacetCount();
newEdge.e = addIfNotExist(retainedStarts[rv].second);
newEdge.s = addIfNotExist(retainedEnds[rv].second);
addEdgeIfValid(newEdge);
}
facetA = mAcceleratorA->getNextFacet();
} // while (*iter != -1)
} // for (uint32_t facetB = 0; facetB < mMeshB->getFacetCount(); ++facetB)
}
void BooleanEvaluator::buildFastFaceFaceIntersection(const BooleanConf& mode)
{
int32_t statusValue = 0;
int32_t inclusionValue = 0;
std::vector<std::pair<Vertex, Vertex> > retainedStarts;
std::vector<std::pair<Vertex, Vertex>> retainedEnds;
VertexPairComparator comp;
Vertex newPointA;
Vertex newPointB;
const Vertex* meshAPoints = mMeshA->getVertices();
const Vertex* meshBPoints = mMeshB->getVertices();
EdgeWithParent newEdge;
mEdgeFacetIntersectionData12.clear();
mEdgeFacetIntersectionData21.clear();
mEdgeFacetIntersectionData12.resize(mMeshA->getFacetCount());
mEdgeFacetIntersectionData21.resize(mMeshB->getFacetCount());
for (uint32_t facetA = 0; facetA < mMeshA->getFacetCount(); ++facetA)
{
const Edge* facetAEdges = mMeshA->getEdges() + mMeshA->getFacet(facetA)->firstEdgeNumber;
int32_t facetB = 0;
const Edge* facetBEdges = mMeshB->getEdges() + mMeshB->getFacet(facetB)->firstEdgeNumber;
const Edge* fae = facetAEdges;
retainedStarts.clear();
retainedEnds.clear();
NvcVec3 compositeEndPoint = {0, 0, 0};
NvcVec3 compositeStartPoint = {0, 0, 0};
uint32_t facetAEdgeCount = mMeshA->getFacet(facetA)->edgesCount;
uint32_t facetBEdgeCount = mMeshB->getFacet(facetB)->edgesCount;
int32_t ic = 0;
for (uint32_t i = 0; i < facetAEdgeCount; ++i)
{
if (shouldSwap(meshAPoints[fae->e].p, meshAPoints[fae->s].p))
{
statusValue = -edgeFacetIntersection12(meshAPoints[fae->e], meshAPoints[fae->s], meshBPoints, facetBEdges, facetBEdgeCount, newPointA, newPointB);
}
else
{
statusValue = edgeFacetIntersection12(meshAPoints[fae->s], meshAPoints[fae->e], meshBPoints, facetBEdges, facetBEdgeCount, newPointA, newPointB);
}
inclusionValue = -inclusionValueEdgeFace(mode, statusValue);
if (inclusionValue > 0)
{
for (ic = 0; ic < inclusionValue; ++ic)
{
retainedEnds.push_back(std::make_pair(newPointA, newPointB));
compositeEndPoint = compositeEndPoint + newPointA.p;
}
mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA));
}
if (inclusionValue < 0)
{
for (ic = 0; ic < -inclusionValue; ++ic)
{
retainedStarts.push_back(std::make_pair(newPointA, newPointB));
compositeStartPoint = compositeStartPoint + newPointA.p;
}
mEdgeFacetIntersectionData12[facetA].push_back(EdgeFacetIntersectionData(i, statusValue, newPointA));
}
fae++;
}
if (retainedStarts.size() != retainedEnds.size())
{
NVBLAST_LOG_ERROR("Not equal number of starting and ending vertices! Probably input mesh has open edges.");
return;
}
if (retainedStarts.size() > 1)
{
comp.basePoint = compositeEndPoint - compositeStartPoint;
std::sort(retainedStarts.begin(), retainedStarts.end(), comp);
std::sort(retainedEnds.begin(), retainedEnds.end(), comp);
}
for (uint32_t rv = 0; rv < retainedStarts.size(); ++rv)
{
newEdge.s = addIfNotExist(retainedStarts[rv].first);
newEdge.e = addIfNotExist(retainedEnds[rv].first);
newEdge.parent = facetA;
addEdgeIfValid(newEdge);
newEdge.parent = facetB + mMeshA->getFacetCount();
newEdge.e = addIfNotExist(retainedStarts[rv].second);
newEdge.s = addIfNotExist(retainedEnds[rv].second);
addEdgeIfValid(newEdge);
}
}
}
void BooleanEvaluator::collectRetainedPartsFromA(const BooleanConf& mode)
{
int32_t statusValue = 0;
int32_t inclusionValue = 0;
const Vertex* vertices = mMeshA->getVertices();
VertexComparator comp;
const NvBounds3& bMeshBoudning = toNvShared(mMeshB->getBoundingBox());
const Edge* facetEdges = mMeshA->getEdges();
std::vector<Vertex> retainedStartVertices;
std::vector<Vertex> retainedEndVertices;
retainedStartVertices.reserve(255);
retainedEndVertices.reserve(255);
int32_t ic = 0;
for (uint32_t facetId = 0; facetId < mMeshA->getFacetCount(); ++facetId)
{
retainedStartVertices.clear();
retainedEndVertices.clear();
for (uint32_t i = 0; i < mMeshA->getFacet(facetId)->edgesCount; ++i)
{
NvcVec3 compositeEndPoint = {0, 0, 0};
NvcVec3 compositeStartPoint = {0, 0, 0};
int32_t lastPos = static_cast<int32_t>(retainedEndVertices.size());
/* Test start and end point of edge against mesh */
if (bMeshBoudning.contains(toNvShared(vertices[facetEdges->s].p)))
{
statusValue = vertexMeshStatus03(vertices[facetEdges->s].p, mMeshB);
}
else
{
statusValue = 0;
}
inclusionValue = -inclusionValue03(mode, statusValue);
if (inclusionValue > 0)
{
for (ic = 0; ic < inclusionValue; ++ic)
{
retainedEndVertices.push_back(vertices[facetEdges->s]);
compositeEndPoint = compositeEndPoint + vertices[facetEdges->s].p;
}
}
else if (inclusionValue < 0)
{
for (ic = 0; ic < -inclusionValue; ++ic)
{
retainedStartVertices.push_back(vertices[facetEdges->s]);
compositeStartPoint = compositeStartPoint + vertices[facetEdges->s].p;
}
}
if (bMeshBoudning.contains(toNvShared(vertices[facetEdges->e].p)))
{
statusValue = vertexMeshStatus03(vertices[facetEdges->e].p, mMeshB);
}
else
{
statusValue = 0;
}
inclusionValue = inclusionValue03(mode, statusValue);
if (inclusionValue > 0)
{
for (ic = 0; ic < inclusionValue; ++ic)
{
retainedEndVertices.push_back(vertices[facetEdges->e]);
compositeEndPoint = compositeEndPoint + vertices[facetEdges->e].p;
}
}
else if (inclusionValue < 0)
{
for (ic = 0; ic < -inclusionValue; ++ic)
{
retainedStartVertices.push_back(vertices[facetEdges->e]);
compositeStartPoint = compositeStartPoint + vertices[facetEdges->e].p;
}
}
/* Test edge intersection with mesh*/
for (uint32_t intrs = 0; intrs < mEdgeFacetIntersectionData12[facetId].size(); ++intrs)
{
const EdgeFacetIntersectionData& intr = mEdgeFacetIntersectionData12[facetId][intrs];
if (intr.edId != (int32_t)i)
continue;
inclusionValue = inclusionValueEdgeFace(mode, intr.intersectionType);
if (inclusionValue > 0)
{
for (ic = 0; ic < inclusionValue; ++ic)
{
retainedEndVertices.push_back(intr.intersectionPoint);
compositeEndPoint = compositeEndPoint + intr.intersectionPoint.p;
}
}
else if (inclusionValue < 0)
{
for (ic = 0; ic < -inclusionValue; ++ic)
{
retainedStartVertices.push_back(intr.intersectionPoint);
compositeStartPoint = compositeStartPoint + intr.intersectionPoint.p;
}
}
}
facetEdges++;
if (retainedStartVertices.size() != retainedEndVertices.size())
{
NVBLAST_LOG_ERROR("Not equal number of starting and ending vertices! Probably input mesh has open edges.");
return;
}
if (retainedEndVertices.size() - lastPos > 1)
{
comp.basePoint = compositeEndPoint - compositeStartPoint;
std::sort(retainedStartVertices.begin() + lastPos, retainedStartVertices.end(), comp);
std::sort(retainedEndVertices.begin() + lastPos, retainedEndVertices.end(), comp);
}
}
EdgeWithParent newEdge;
for (uint32_t rv = 0; rv < retainedStartVertices.size(); ++rv)
{
newEdge.s = addIfNotExist(retainedStartVertices[rv]);
newEdge.e = addIfNotExist(retainedEndVertices[rv]);
newEdge.parent = facetId;
addEdgeIfValid(newEdge);
}
}
return;
}
void BooleanEvaluator::collectRetainedPartsFromB(const BooleanConf& mode)
{
int32_t statusValue = 0;
int32_t inclusionValue = 0;
const Vertex* vertices = mMeshB->getVertices();
VertexComparator comp;
const NvBounds3& aMeshBoudning = toNvShared(mMeshA->getBoundingBox());
const Edge* facetEdges = mMeshB->getEdges();
std::vector<Vertex> retainedStartVertices;
std::vector<Vertex> retainedEndVertices;
retainedStartVertices.reserve(255);
retainedEndVertices.reserve(255);
int32_t ic = 0;
for (uint32_t facetId = 0; facetId < mMeshB->getFacetCount(); ++facetId)
{
retainedStartVertices.clear();
retainedEndVertices.clear();
for (uint32_t i = 0; i < mMeshB->getFacet(facetId)->edgesCount; ++i)
{
NvcVec3 compositeEndPoint = {0, 0, 0};
NvcVec3 compositeStartPoint = {0, 0, 0};
int32_t lastPos = static_cast<int32_t>(retainedEndVertices.size());
if (aMeshBoudning.contains(toNvShared(vertices[facetEdges->s].p)))
{
statusValue = vertexMeshStatus30(vertices[facetEdges->s].p, mMeshA);
}
else
{
statusValue = 0;
}
inclusionValue = -inclusionValue30(mode, statusValue);
if (inclusionValue > 0)
{
for (ic = 0; ic < inclusionValue; ++ic)
{
retainedEndVertices.push_back(vertices[facetEdges->s]);
compositeEndPoint = compositeEndPoint + vertices[facetEdges->s].p;
}
}
else if (inclusionValue < 0)
{
for (ic = 0; ic < -inclusionValue; ++ic)
{
retainedStartVertices.push_back(vertices[facetEdges->s]);
compositeStartPoint = compositeStartPoint + vertices[facetEdges->s].p;
}
}
if (aMeshBoudning.contains(toNvShared(vertices[facetEdges->e].p)))
{
statusValue = vertexMeshStatus30(vertices[facetEdges->e].p, mMeshA);
}
else
{
statusValue = 0;
}
inclusionValue = inclusionValue30(mode, statusValue);
if (inclusionValue > 0)
{
for (ic = 0; ic < inclusionValue; ++ic)
{
retainedEndVertices.push_back(vertices[facetEdges->e]);
compositeEndPoint = compositeEndPoint + vertices[facetEdges->e].p;
}
}
else if (inclusionValue < 0)
{
for (ic = 0; ic < -inclusionValue; ++ic)
{
retainedStartVertices.push_back(vertices[facetEdges->e]);
compositeStartPoint = compositeStartPoint + vertices[facetEdges->e].p;
}
}
for (uint32_t intrs = 0; intrs < mEdgeFacetIntersectionData21[facetId].size(); ++intrs)
{
const EdgeFacetIntersectionData& intr = mEdgeFacetIntersectionData21[facetId][intrs];
if (intr.edId != (int32_t)i)
continue;
inclusionValue = inclusionValueEdgeFace(mode, intr.intersectionType);
if (inclusionValue > 0)
{
for (ic = 0; ic < inclusionValue; ++ic)
{
retainedEndVertices.push_back(intr.intersectionPoint);
compositeEndPoint = compositeEndPoint + intr.intersectionPoint.p;
}
}
else if (inclusionValue < 0)
{
for (ic = 0; ic < -inclusionValue; ++ic)
{
retainedStartVertices.push_back(intr.intersectionPoint);
compositeStartPoint = compositeStartPoint + intr.intersectionPoint.p;
}
}
}
facetEdges++;
if (retainedStartVertices.size() != retainedEndVertices.size())
{
NVBLAST_LOG_ERROR("Not equal number of starting and ending vertices! Probably input mesh has open edges.");
return;
}
if (retainedEndVertices.size() - lastPos > 1)
{
comp.basePoint = compositeEndPoint - compositeStartPoint;
std::sort(retainedStartVertices.begin() + lastPos, retainedStartVertices.end(), comp);
std::sort(retainedEndVertices.begin() + lastPos, retainedEndVertices.end(), comp);
}
}
EdgeWithParent newEdge;
for (uint32_t rv = 0; rv < retainedStartVertices.size(); ++rv)
{
newEdge.s = addIfNotExist(retainedStartVertices[rv]);
newEdge.e = addIfNotExist(retainedEndVertices[rv]);
newEdge.parent = facetId + mMeshA->getFacetCount();
addEdgeIfValid(newEdge);
}
}
return;
}
bool EdgeWithParentSortComp(const EdgeWithParent& a, const EdgeWithParent& b)
{
return a.parent < b.parent;
}
void BooleanEvaluator::performBoolean(const Mesh* meshA, const Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, const BooleanConf& mode)
{
reset();
mMeshA = meshA;
mMeshB = meshB;
mAcceleratorA = spAccelA;
mAcceleratorB = spAccelB;
buildFaceFaceIntersections(mode);
collectRetainedPartsFromA(mode);
collectRetainedPartsFromB(mode);
mAcceleratorA = nullptr;
mAcceleratorB = nullptr;
}
void BooleanEvaluator::performBoolean(const Mesh* meshA, const Mesh* meshB, const BooleanConf& mode)
{
reset();
mMeshA = meshA;
mMeshB = meshB;
DummyAccelerator ac = DummyAccelerator(mMeshA->getFacetCount());
DummyAccelerator bc = DummyAccelerator(mMeshB->getFacetCount());
performBoolean(meshA, meshB, &ac, &bc, mode);
}
void BooleanEvaluator::performFastCutting(const Mesh* meshA, const Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, const BooleanConf& mode)
{
reset();
mMeshA = meshA;
mMeshB = meshB;
mAcceleratorA = spAccelA;
mAcceleratorB = spAccelB;
buildFastFaceFaceIntersection(mode);
collectRetainedPartsFromA(mode);
mAcceleratorA = nullptr;
mAcceleratorB = nullptr;
}
void BooleanEvaluator::performFastCutting(const Mesh* meshA, const Mesh* meshB, const BooleanConf& mode)
{
reset();
mMeshA = meshA;
mMeshB = meshB;
DummyAccelerator ac = DummyAccelerator(mMeshA->getFacetCount());
DummyAccelerator bc = DummyAccelerator(mMeshB->getFacetCount());
performFastCutting(meshA, meshB, &ac, &bc, mode);
}
BooleanEvaluator::BooleanEvaluator()
{
mMeshA = nullptr;
mMeshB = nullptr;
mAcceleratorA = nullptr;
mAcceleratorB = nullptr;
}
BooleanEvaluator::~BooleanEvaluator()
{
reset();
}
Mesh* BooleanEvaluator::createNewMesh()
{
if (mEdgeAggregate.size() == 0)
{
return nullptr;
}
std::sort(mEdgeAggregate.begin(), mEdgeAggregate.end(), EdgeWithParentSortComp);
std::vector<Facet> newFacets;
std::vector<Edge> newEdges(mEdgeAggregate.size());
int32_t lastPos = 0;
uint32_t lastParent = mEdgeAggregate[0].parent;
uint32_t collected = 0;
int64_t userData = 0;
int32_t materialId = 0;
int32_t smoothingGroup = 0;
for (uint32_t i = 0; i < mEdgeAggregate.size(); ++i)
{
if (mEdgeAggregate[i].parent != lastParent)
{
if (lastParent < mMeshA->getFacetCount())
{
userData = mMeshA->getFacet(lastParent)->userData;
materialId = mMeshA->getFacet(lastParent)->materialId;
smoothingGroup = mMeshA->getFacet(lastParent)->smoothingGroup;
}
else
{
userData = mMeshB->getFacet(lastParent - mMeshA->getFacetCount())->userData;
materialId = mMeshB->getFacet(lastParent - mMeshA->getFacetCount())->materialId;
smoothingGroup = mMeshB->getFacet(lastParent - mMeshA->getFacetCount())->smoothingGroup;
}
newFacets.push_back({ lastPos, collected, userData, materialId, smoothingGroup });
lastPos = i;
lastParent = mEdgeAggregate[i].parent;
collected = 0;
}
collected++;
newEdges[i].s = mEdgeAggregate[i].s;
newEdges[i].e = mEdgeAggregate[i].e;
}
if (lastParent < mMeshA->getFacetCount())
{
userData = mMeshA->getFacet(lastParent)->userData;
materialId = mMeshA->getFacet(lastParent)->materialId;
smoothingGroup = mMeshA->getFacet(lastParent)->smoothingGroup;
}
else
{
uint32_t pr = lastParent - mMeshA->getFacetCount();
userData = mMeshB->getFacet(pr)->userData;
materialId = mMeshB->getFacet(pr)->materialId;
smoothingGroup = mMeshB->getFacet(pr)->smoothingGroup;
}
newFacets.push_back({ lastPos, collected, userData, materialId, smoothingGroup });
return new MeshImpl(mVerticesAggregate.data(), newEdges.data(), newFacets.data(), static_cast<uint32_t>(mVerticesAggregate.size()), static_cast<uint32_t>(mEdgeAggregate.size()), static_cast<uint32_t>(newFacets.size()));
}
void BooleanEvaluator::reset()
{
mMeshA = nullptr;
mMeshB = nullptr;
mAcceleratorA = nullptr;
mAcceleratorB = nullptr;
mEdgeAggregate.clear();
mVerticesAggregate.clear();
mEdgeFacetIntersectionData12.clear();
mEdgeFacetIntersectionData21.clear();
}
/// BooleanTool
void BooleanToolImpl::release()
{
delete this;
}
Mesh* BooleanToolImpl::performBoolean(const Mesh* meshA, SpatialAccelerator* accelA, const Mesh* meshB, SpatialAccelerator* accelB, BooleanTool::Op op)
{
const BooleanConf modes[] =
{
BooleanConfigurations::BOOLEAN_INTERSECTION(),
BooleanConfigurations::BOOLEAN_UNION(),
BooleanConfigurations::BOOLEAN_DIFFERENCE(),
};
constexpr size_t modeCount = sizeof(modes)/sizeof(modes[0]);
if (op < 0 || op >= modeCount)
{
NVBLAST_LOG_ERROR("Illegal mode passed into BooleanToolImpl::performBoolean.");
return nullptr;
}
if (!meshA || !meshB)
{
NVBLAST_LOG_ERROR("Null mesh pointer passed into BooleanToolImpl::performBoolean.");
return nullptr;
}
DummyAccelerator dmAccelA(meshA->getFacetCount());
DummyAccelerator dmAccelB(meshA->getFacetCount());
m_evaluator.performBoolean(meshA, meshB, accelA ? accelA : &dmAccelA, accelB ? accelB : &dmAccelB, modes[op]);
return m_evaluator.createNewMesh();
}
bool BooleanToolImpl::pointInMesh(const Mesh* mesh, SpatialAccelerator* accel, const NvcVec3& point)
{
if (!mesh)
{
NVBLAST_LOG_ERROR("Null mesh pointer passed into BooleanToolImpl::pointInMesh.");
return false;
}
DummyAccelerator dmAccel(mesh->getFacetCount());
return m_evaluator.isPointContainedInMesh(mesh, accel ? accel : &dmAccel, point);
}
} // namespace Blast
} // namespace Nv
| 47,338 | C++ | 33.105908 | 223 | 0.575647 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringVSA.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTEXTAUTHORINGVSA_H
#define NVBLASTEXTAUTHORINGVSA_H
namespace Nv
{
namespace Blast
{
/*
This code copied from APEX GSA
*/
namespace VSA
{
typedef float real;
struct VS3D_Halfspace_Set
{
virtual real farthest_halfspace(real plane[4], const real point[4]) = 0;
};
// Simple types and operations for internal calculations
struct Vec3 { real x, y, z; }; // 3-vector
inline Vec3 vec3(real x, real y, real z) { Vec3 r; r.x = x; r.y = y; r.z = z; return r; } // vector builder
inline Vec3 operator + (const Vec3& a, const Vec3& b) { return vec3(a.x + b.x, a.y + b.y, a.z + b.z); } // vector addition
inline Vec3 operator * (real s, const Vec3& v) { return vec3(s*v.x, s*v.y, s*v.z); } // scalar multiplication
inline real operator | (const Vec3& a, const Vec3& b) { return a.x*b.x + a.y*b.y + a.z*b.z; } // dot product
inline Vec3 operator ^ (const Vec3& a, const Vec3& b) { return vec3(a.y*b.z - b.y*a.z, a.z*b.x - b.z*a.x, a.x*b.y - b.x*a.y); } // cross product
struct Vec4 { Vec3 v; real w; }; // 4-vector split into 3-vector and scalar parts
inline Vec4 vec4(const Vec3& v, real w) { Vec4 r; r.v = v; r.w = w; return r; } // vector builder
inline real operator | (const Vec4& a, const Vec4& b) { return (a.v | b.v) + a.w*b.w; } // dot product
// More accurate perpendicular
inline Vec3 perp(const Vec3& a, const Vec3& b)
{
Vec3 c = a^b; // Cross-product gives perpendicular
#if VS3D_HIGH_ACCURACY || REAL_DOUBLE
const real c2 = c | c;
if (c2 != 0) c = c + (1 / c2)*((a | c)*(c^b) + (b | c)*(a^c)); // Improvement to (a b)^T(c) = (0)
#endif
return c;
}
// Square
inline real sq(real x) { return x*x; }
// Returns index of the extremal element in a three-element set {e0, e1, e2} based upon comparisons c_ij. The extremal index m is such that c_mn is true, or e_m == e_n, for all n.
inline int ext_index(int c_10, int c_21, int c_20) { return c_10 << c_21 | (c_21&c_20) << 1; }
// Returns index (0, 1, or 2) of minimum argument
inline int index_of_min(real x0, real x1, real x2) { return ext_index((int)(x1 < x0), (int)(x2 < x1), (int)(x2 < x0)); }
// Compare fractions with positive deominators. Returns a_num*sqrt(a_rden2) > b_num*sqrt(b_rden2)
inline bool frac_gt(real a_num, real a_rden2, real b_num, real b_rden2)
{
const bool a_num_neg = a_num < 0;
const bool b_num_neg = b_num < 0;
return a_num_neg != b_num_neg ? b_num_neg : ((a_num*a_num*a_rden2 > b_num*b_num*b_rden2) != a_num_neg);
}
// Returns index (0, 1, or 2) of maximum fraction with positive deominators
inline int index_of_max_frac(real x0_num, real x0_rden2, real x1_num, real x1_rden2, real x2_num, real x2_rden2)
{
return ext_index((int)frac_gt(x1_num, x1_rden2, x0_num, x0_rden2), (int)frac_gt(x2_num, x2_rden2, x1_num, x1_rden2), (int)frac_gt(x2_num, x2_rden2, x0_num, x0_rden2));
}
// Compare values given their signs and squares. Returns a > b. a2 and b2 may have any constant offset applied to them.
inline bool sgn_sq_gt(real sgn_a, real a2, real sgn_b, real b2) { return sgn_a*sgn_b < 0 ? (sgn_b < 0) : ((a2 > b2) != (sgn_a < 0)); }
// Returns index (0, 1, or 2) of maximum value given their signs and squares. sq_x0, sq_x1, and sq_x2 may have any constant offset applied to them.
inline int index_of_max_sgn_sq(real sgn_x0, real sq_x0, real sgn_x1, real sq_x1, real sgn_x2, real sq_x2)
{
return ext_index((int)sgn_sq_gt(sgn_x1, sq_x1, sgn_x0, sq_x0), (int)sgn_sq_gt(sgn_x2, sq_x2, sgn_x1, sq_x1), (int)sgn_sq_gt(sgn_x2, sq_x2, sgn_x0, sq_x0));
}
// Project 2D (homogeneous) vector onto 2D half-space boundary
inline void project2D(Vec3& r, const Vec3& plane, real delta, real recip_n2, real eps2)
{
r = r + (-delta*recip_n2)*vec3(plane.x, plane.y, 0);
r = r + (-(r | plane)*recip_n2)*vec3(plane.x, plane.y, 0); // Second projection for increased accuracy
if ((r | r) > eps2) return;
r = (-plane.z*recip_n2)*vec3(plane.x, plane.y, 0);
r.z = 1;
}
// Update function for vs3d_test
static bool vs3d_update(Vec4& p, Vec4 S[4], int& plane_count, const Vec4& q, real eps2)
{
// h plane is the last plane
const Vec4& h = S[plane_count - 1];
// Handle plane_count == 1 specially (optimization; this could be commented out)
if (plane_count == 1)
{
// Solution is objective projected onto h plane
p = q;
p.v = p.v + -(p | h)*h.v;
if ((p | p) <= eps2) p = vec4(-h.w*h.v, 1); // If p == 0 then q is a direction vector, any point in h is a support point
return true;
}
// Create basis in the h plane
const int min_i = index_of_min(h.v.x*h.v.x, h.v.y*h.v.y, h.v.z*h.v.z);
const Vec3 y = h.v^vec3((real)(min_i == 0), (real)(min_i == 1), (real)(min_i == 2));
const Vec3 x = y^h.v;
// Use reduced vector r instead of p
Vec3 r = { x | q.v, y | q.v, q.w*(y | y) }; // (x|x) = (y|y) = square of plane basis scale
// If r == 0 (within epsilon), then it is a direction vector, and we have a bounded solution
if ((r | r) <= eps2) r.z = 1;
// Create plane equations in the h plane. These will not be normalized in general.
int N = 0; // Plane count in h subspace
Vec3 R[3]; // Planes in h subspace
real recip_n2[3]; // Plane normal vector reciprocal lengths squared
real delta[3]; // Signed distance of objective to the planes
int index[3]; // Keep track of original plane indices
for (int i = 0; i < plane_count - 1; ++i)
{
const Vec3& vi = S[i].v;
const real cos_theta = h.v | vi;
R[N] = vec3(x | vi, y | vi, S[i].w - h.w*cos_theta);
index[N] = i;
const real n2 = R[N].x*R[N].x + R[N].y*R[N].y;
if (n2 >= eps2)
{
const real lin_norm = (real)1.5 - (real)0.5*n2; // 1st-order approximation to 1/sqrt(n2) expanded about n2 = 1
R[N] = lin_norm*R[N]; // We don't need normalized plane equations, but rescaling (even with an approximate normalization) gives better numerical behavior
recip_n2[N] = 1 / (R[N].x*R[N].x + R[N].y*R[N].y);
delta[N] = r | R[N];
++N; // Keep this plane
}
else if (cos_theta < 0) return false; // Parallel cases are redundant and rejected, anti-parallel cases are 1D voids
}
// Now work with the N-sized R array of half-spaces in the h plane
switch (N)
{
case 1: one_plane :
if (delta[0] < 0) N = 0; // S[0] is redundant, eliminate it
else project2D(r, R[0], delta[0], recip_n2[0], eps2);
break;
case 2: two_planes :
if (delta[0] < 0 && delta[1] < 0) N = 0; // S[0] and S[1] are redundant, eliminate them
else
{
const int max_d_index = (int)frac_gt(delta[1], recip_n2[1], delta[0], recip_n2[0]);
project2D(r, R[max_d_index], delta[max_d_index], recip_n2[max_d_index], eps2);
const int min_d_index = max_d_index ^ 1;
const real new_delta_min = r | R[min_d_index];
if (new_delta_min < 0)
{
index[0] = index[max_d_index];
N = 1; // S[min_d_index] is redundant, eliminate it
}
else
{
// Set r to the intersection of R[0] and R[1] and keep both
r = perp(R[0], R[1]);
if (r.z*r.z*recip_n2[0] * recip_n2[1] < eps2)
{
if (R[0].x*R[1].x + R[0].y*R[1].y < 0) return false; // 2D void found
goto one_plane;
}
r = (1 / r.z)*r; // We could just as well multiply r by sgn(r.z); we just need to ensure r.z > 0
}
}
break;
case 3:
if (delta[0] < 0 && delta[1] < 0 && delta[2] < 0) N = 0; // S[0], S[1], and S[2] are redundant, eliminate them
else
{
const Vec3 row_x = { R[0].x, R[1].x, R[2].x };
const Vec3 row_y = { R[0].y, R[1].y, R[2].y };
const Vec3 row_w = { R[0].z, R[1].z, R[2].z };
const Vec3 cof_w = perp(row_x, row_y);
const bool detR_pos = (row_w | cof_w) > 0;
const int nrw_sgn0 = cof_w.x*cof_w.x*recip_n2[1] * recip_n2[2] < eps2 ? 0 : (((int)((cof_w.x > 0) == detR_pos) << 1) - 1);
const int nrw_sgn1 = cof_w.y*cof_w.y*recip_n2[2] * recip_n2[0] < eps2 ? 0 : (((int)((cof_w.y > 0) == detR_pos) << 1) - 1);
const int nrw_sgn2 = cof_w.z*cof_w.z*recip_n2[0] * recip_n2[1] < eps2 ? 0 : (((int)((cof_w.z > 0) == detR_pos) << 1) - 1);
if ((nrw_sgn0 | nrw_sgn1 | nrw_sgn2) >= 0) return false; // 3D void found
const int positive_width_count = ((nrw_sgn0 >> 1) & 1) + ((nrw_sgn1 >> 1) & 1) + ((nrw_sgn2 >> 1) & 1);
if (positive_width_count == 1)
{
// A single positive width results from a redundant plane. Eliminate it and peform N = 2 calculation.
const int pos_width_index = ((nrw_sgn1 >> 1) & 1) | (nrw_sgn2 & 2); // Calculates which index corresponds to the positive-width side
R[pos_width_index] = R[2];
recip_n2[pos_width_index] = recip_n2[2];
delta[pos_width_index] = delta[2];
index[pos_width_index] = index[2];
N = 2;
goto two_planes;
}
// Find the max dot product of r and R[i]/|R_normal[i]|. For numerical accuracy when the angle between r and the i^{th} plane normal is small, we take some care below:
const int max_d_index = r.z != 0
? index_of_max_frac(delta[0], recip_n2[0], delta[1], recip_n2[1], delta[2], recip_n2[2]) // displacement term resolves small-angle ambiguity, just use dot product
: index_of_max_sgn_sq(delta[0], -sq(r.x*R[0].y - r.y*R[0].x)*recip_n2[0], delta[1], -sq(r.x*R[1].y - r.y*R[1].x)*recip_n2[1], delta[2], -sq(r.x*R[2].y - r.y*R[2].x)*recip_n2[2]); // No displacement term. Use wedge product to find the sine of the angle.
// Project r onto max-d plane
project2D(r, R[max_d_index], delta[max_d_index], recip_n2[max_d_index], eps2);
N = 1; // Unless we use a vertex in the loop below
const int index_max = index[max_d_index];
// The number of finite widths should be >= 2. If not, it should be 0, but in any case it implies three parallel lines in the plane, which we should not have here.
// If we do have three parallel lines (# of finite widths < 2), we've picked the line corresponding to the half-plane farthest from r, which is correct.
const int finite_width_count = (nrw_sgn0 & 1) + (nrw_sgn1 & 1) + (nrw_sgn2 & 1);
if (finite_width_count >= 2)
{
const int i_remaining[2] = { (1 << max_d_index) & 3, (3 >> max_d_index) ^ 1 }; // = {(max_d_index+1)%3, (max_d_index+2)%3}
const int i_select = (int)frac_gt(delta[i_remaining[1]], recip_n2[i_remaining[1]], delta[i_remaining[0]], recip_n2[i_remaining[0]]); // Select the greater of the remaining dot products
for (int i = 0; i < 2; ++i)
{
const int j = i_remaining[i_select^i]; // i = 0 => the next-greatest, i = 1 => the least
if ((r | R[j]) >= 0)
{
r = perp(R[max_d_index], R[j]);
r = (1 / r.z)*r; // We could just as well multiply r by sgn(r.z); we just need to ensure r.z > 0
index[1] = index[j];
N = 2;
break;
}
}
}
index[0] = index_max;
}
break;
}
// Transform r back to 3D space
p = vec4(r.x*x + r.y*y + (-r.z*h.w)*h.v, r.z);
// Pack S array with kept planes
if (N < 2 || index[1] != 0) { for (int i = 0; i < N; ++i) S[i] = S[index[i]]; } // Safe to copy columns in order
else { const Vec4 temp = S[0]; S[0] = S[index[0]]; S[1] = temp; } // Otherwise use temp storage to avoid overwrite
S[N] = h;
plane_count = N + 1;
return true;
}
// Performs the VS algorithm for D = 3
inline int vs3d_test(VS3D_Halfspace_Set& halfspace_set, real* q = nullptr)
{
// Objective = q if it is not NULL, otherwise it is the origin represented in homogeneous coordinates
const Vec4 objective = q ? (q[3] != 0 ? vec4((1 / q[3])*vec3(q[0], q[1], q[2]), 1) : *(Vec4*)q) : vec4(vec3(0, 0, 0), 1);
// Tolerance for 3D void simplex algorithm
const real eps_f = (real)1 / (sizeof(real) == 4 ? (1L << 23) : (1LL << 52)); // Floating-point epsilon
#if VS3D_HIGH_ACCURACY || REAL_DOUBLE
const real eps = 8 * eps_f;
#else
const real eps = 80 * eps_f;
#endif
const real eps2 = eps*eps; // Using epsilon squared
// Maximum allowed iterations of main loop. If exceeded, error code is returned
const int max_iteration_count = 50;
// State
Vec4 S[4]; // Up to 4 planes
int plane_count = 0; // Number of valid planes
Vec4 p = objective; // Test point, initialized to objective
// Default result, changed to valid result if found in loop below
int result = -1;
// Iterate until a stopping condition is met or the maximum number of iterations is reached
for (int i = 0; result < 0 && i < max_iteration_count; ++i)
{
Vec4& plane = S[plane_count++];
real delta = halfspace_set.farthest_halfspace(&plane.v.x, &p.v.x);
#if VS3D_UNNORMALIZED_PLANE_HANDLING != 0
const real recip_norm = vs3d_recip_sqrt(plane.v | plane.v);
plane = vec4(recip_norm*plane.v, recip_norm*plane.w);
delta *= recip_norm;
#endif
if (delta <= 0 || delta*delta <= eps2*(p | p)) result = 1; // Intersection found
else if (!vs3d_update(p, S, plane_count, objective, eps2)) result = 0; // Void simplex found
}
// If q is given, fill it with the solution (normalize p.w if it is not zero)
if (q) *(Vec4*)q = (p.w != 0) ? vec4((1 / p.w)*p.v, 1) : p;
return result;
}
} // namespace VSA
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTEXTAUTHORINGVSA_H
| 15,829 | C | 46.969697 | 270 | 0.5766 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtApexSharedParts.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastExtApexSharedParts.h"
#include "NvBlastGlobals.h"
#include "NvBlastMemory.h"
#include "NvBlastAssert.h"
#include "NsVecMath.h"
#include "NvMat44.h"
#include "NvBounds3.h"
#include "NsVecMath.h"
#include <vector>
using namespace nvidia;
using namespace nvidia::shdfnd::aos;
namespace Nv
{
namespace Blast
{
NV_NOALIAS NV_FORCE_INLINE BoolV PointOutsideOfPlane4(const Vec3VArg _a, const Vec3VArg _b, const Vec3VArg _c, const Vec3VArg _d)
{
// this is not 0 because of the following scenario:
// All the points lie on the same plane and the plane goes through the origin (0,0,0).
// On the Wii U, the math below has the problem that when point A gets projected on the
// plane cumputed by A, B, C, the distance to the plane might not be 0 for the mentioned
// scenario but a small positive or negative value. This can lead to the wrong boolean
// results. Using a small negative value as threshold is more conservative but safer.
const Vec4V zero = V4Load(-1e-6f);
const Vec3V ab = V3Sub(_b, _a);
const Vec3V ac = V3Sub(_c, _a);
const Vec3V ad = V3Sub(_d, _a);
const Vec3V bd = V3Sub(_d, _b);
const Vec3V bc = V3Sub(_c, _b);
const Vec3V v0 = V3Cross(ab, ac);
const Vec3V v1 = V3Cross(ac, ad);
const Vec3V v2 = V3Cross(ad, ab);
const Vec3V v3 = V3Cross(bd, bc);
const FloatV signa0 = V3Dot(v0, _a);
const FloatV signa1 = V3Dot(v1, _a);
const FloatV signa2 = V3Dot(v2, _a);
const FloatV signd3 = V3Dot(v3, _a);
const FloatV signd0 = V3Dot(v0, _d);
const FloatV signd1 = V3Dot(v1, _b);
const FloatV signd2 = V3Dot(v2, _c);
const FloatV signa3 = V3Dot(v3, _b);
const Vec4V signa = V4Merge(signa0, signa1, signa2, signa3);
const Vec4V signd = V4Merge(signd0, signd1, signd2, signd3);
return V4IsGrtrOrEq(V4Mul(signa, signd), zero);//same side, outside of the plane
}
NV_NOALIAS NV_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg a, const Vec3VArg b)
{
const FloatV zero = FZero();
const FloatV one = FOne();
//Test degenerated case
const Vec3V ab = V3Sub(b, a);
const FloatV denom = V3Dot(ab, ab);
const Vec3V ap = V3Neg(a);//V3Sub(origin, a);
const FloatV nom = V3Dot(ap, ab);
const BoolV con = FIsEq(denom, zero);
const FloatV tValue = FClamp(FDiv(nom, denom), zero, one);
const FloatV t = FSel(con, zero, tValue);
return V3Sel(con, a, V3ScaleAdd(ab, t, a));
}
NV_NOALIAS NV_FORCE_INLINE Vec3V closestPtPointSegment(const Vec3VArg Q0, const Vec3VArg Q1, const Vec3VArg A0, const Vec3VArg A1,
const Vec3VArg B0, const Vec3VArg B1, uint32_t& size, Vec3V& closestA, Vec3V& closestB)
{
const Vec3V a = Q0;
const Vec3V b = Q1;
const BoolV bTrue = BTTTT();
const FloatV zero = FZero();
const FloatV one = FOne();
//Test degenerated case
const Vec3V ab = V3Sub(b, a);
const FloatV denom = V3Dot(ab, ab);
const Vec3V ap = V3Neg(a);//V3Sub(origin, a);
const FloatV nom = V3Dot(ap, ab);
const BoolV con = FIsEq(denom, zero);
if (BAllEq(con, bTrue))
{
size = 1;
closestA = A0;
closestB = B0;
return Q0;
}
const Vec3V v = V3Sub(A1, A0);
const Vec3V w = V3Sub(B1, B0);
const FloatV tValue = FClamp(FDiv(nom, denom), zero, one);
const FloatV t = FSel(con, zero, tValue);
const Vec3V tempClosestA = V3ScaleAdd(v, t, A0);
const Vec3V tempClosestB = V3ScaleAdd(w, t, B0);
closestA = tempClosestA;
closestB = tempClosestB;
return V3Sub(tempClosestA, tempClosestB);
}
NV_NOALIAS Vec3V closestPtPointSegmentTesselation(const Vec3VArg Q0, const Vec3VArg Q1, const Vec3VArg A0, const Vec3VArg A1,
const Vec3VArg B0, const Vec3VArg B1, uint32_t& size, Vec3V& closestA, Vec3V& closestB)
{
const FloatV half = FHalf();
const FloatV targetSegmentLengthSq = FLoad(10000.f);//100 unit
Vec3V q0 = Q0;
Vec3V q1 = Q1;
Vec3V a0 = A0;
Vec3V a1 = A1;
Vec3V b0 = B0;
Vec3V b1 = B1;
for (;;)
{
const Vec3V midPoint = V3Scale(V3Add(q0, q1), half);
const Vec3V midA = V3Scale(V3Add(a0, a1), half);
const Vec3V midB = V3Scale(V3Add(b0, b1), half);
const Vec3V v = V3Sub(midPoint, q0);
const FloatV sqV = V3Dot(v, v);
if (FAllGrtr(targetSegmentLengthSq, sqV))
break;
//split the segment into half
const Vec3V tClos0 = closestPtPointSegment(q0, midPoint);
const FloatV sqDist0 = V3Dot(tClos0, tClos0);
const Vec3V tClos1 = closestPtPointSegment(q1, midPoint);
const FloatV sqDist1 = V3Dot(tClos1, tClos1);
//const BoolV con = FIsGrtr(sqDist0, sqDist1);
if (FAllGrtr(sqDist0, sqDist1))
{
//segment [m, q1]
q0 = midPoint;
a0 = midA;
b0 = midB;
}
else
{
//segment [q0, m]
q1 = midPoint;
a1 = midA;
b1 = midB;
}
}
return closestPtPointSegment(q0, q1, a0, a1, b0, b1, size, closestA, closestB);
}
NV_NOALIAS Vec3V closestPtPointTriangleTesselation(const Vec3V* NV_RESTRICT Q, const Vec3V* NV_RESTRICT A, const Vec3V* NV_RESTRICT B, const uint32_t* NV_RESTRICT indices, uint32_t& size, Vec3V& closestA, Vec3V& closestB)
{
size = 3;
const FloatV zero = FZero();
const FloatV eps = FEps();
const FloatV half = FHalf();
const BoolV bTrue = BTTTT();
const FloatV four = FLoad(4.f);
const FloatV sixty = FLoad(100.f);
const uint32_t ind0 = indices[0];
const uint32_t ind1 = indices[1];
const uint32_t ind2 = indices[2];
const Vec3V a = Q[ind0];
const Vec3V b = Q[ind1];
const Vec3V c = Q[ind2];
Vec3V ab_ = V3Sub(b, a);
Vec3V ac_ = V3Sub(c, a);
Vec3V bc_ = V3Sub(b, c);
const FloatV dac_ = V3Dot(ac_, ac_);
const FloatV dbc_ = V3Dot(bc_, bc_);
if (FAllGrtrOrEq(eps, FMin(dac_, dbc_)))
{
//degenerate
size = 2;
return closestPtPointSegment(Q[ind0], Q[ind1], A[ind0], A[ind1], B[ind0], B[ind1], size, closestA, closestB);
}
Vec3V ap = V3Neg(a);
Vec3V bp = V3Neg(b);
Vec3V cp = V3Neg(c);
FloatV d1 = V3Dot(ab_, ap); // snom
FloatV d2 = V3Dot(ac_, ap); // tnom
FloatV d3 = V3Dot(ab_, bp); // -sdenom
FloatV d4 = V3Dot(ac_, bp); // unom = d4 - d3
FloatV d5 = V3Dot(ab_, cp); // udenom = d5 - d6
FloatV d6 = V3Dot(ac_, cp); // -tdenom
/* FloatV unom = FSub(d4, d3);
FloatV udenom = FSub(d5, d6);*/
FloatV va = FNegScaleSub(d5, d4, FMul(d3, d6));//edge region of BC
FloatV vb = FNegScaleSub(d1, d6, FMul(d5, d2));//edge region of AC
FloatV vc = FNegScaleSub(d3, d2, FMul(d1, d4));//edge region of AB
//check if p in vertex region outside a
const BoolV con00 = FIsGrtrOrEq(zero, d1); // snom <= 0
const BoolV con01 = FIsGrtrOrEq(zero, d2); // tnom <= 0
const BoolV con0 = BAnd(con00, con01); // vertex region a
if (BAllEq(con0, bTrue))
{
//size = 1;
closestA = A[ind0];
closestB = B[ind0];
return Q[ind0];
}
//check if p in vertex region outside b
const BoolV con10 = FIsGrtrOrEq(d3, zero);
const BoolV con11 = FIsGrtrOrEq(d3, d4);
const BoolV con1 = BAnd(con10, con11); // vertex region b
if (BAllEq(con1, bTrue))
{
/*size = 1;
indices[0] = ind1;*/
closestA = A[ind1];
closestB = B[ind1];
return Q[ind1];
}
//check if p in vertex region outside of c
const BoolV con20 = FIsGrtrOrEq(d6, zero);
const BoolV con21 = FIsGrtrOrEq(d6, d5);
const BoolV con2 = BAnd(con20, con21); // vertex region c
if (BAllEq(con2, bTrue))
{
closestA = A[ind2];
closestB = B[ind2];
return Q[ind2];
}
//check if p in edge region of AB
const BoolV con30 = FIsGrtrOrEq(zero, vc);
const BoolV con31 = FIsGrtrOrEq(d1, zero);
const BoolV con32 = FIsGrtrOrEq(zero, d3);
const BoolV con3 = BAnd(con30, BAnd(con31, con32));
if (BAllEq(con3, bTrue))
{
//size = 2;
//p in edge region of AB, split AB
return closestPtPointSegmentTesselation(Q[ind0], Q[ind1], A[ind0], A[ind1], B[ind0], B[ind1], size, closestA, closestB);
}
//check if p in edge region of BC
const BoolV con40 = FIsGrtrOrEq(zero, va);
const BoolV con41 = FIsGrtrOrEq(d4, d3);
const BoolV con42 = FIsGrtrOrEq(d5, d6);
const BoolV con4 = BAnd(con40, BAnd(con41, con42));
if (BAllEq(con4, bTrue))
{
//p in edge region of BC, split BC
return closestPtPointSegmentTesselation(Q[ind1], Q[ind2], A[ind1], A[ind2], B[ind1], B[ind2], size, closestA, closestB);
}
//check if p in edge region of AC
const BoolV con50 = FIsGrtrOrEq(zero, vb);
const BoolV con51 = FIsGrtrOrEq(d2, zero);
const BoolV con52 = FIsGrtrOrEq(zero, d6);
const BoolV con5 = BAnd(con50, BAnd(con51, con52));
if (BAllEq(con5, bTrue))
{
//p in edge region of AC, split AC
return closestPtPointSegmentTesselation(Q[ind0], Q[ind2], A[ind0], A[ind2], B[ind0], B[ind2], size, closestA, closestB);
}
size = 3;
Vec3V q0 = Q[ind0];
Vec3V q1 = Q[ind1];
Vec3V q2 = Q[ind2];
Vec3V a0 = A[ind0];
Vec3V a1 = A[ind1];
Vec3V a2 = A[ind2];
Vec3V b0 = B[ind0];
Vec3V b1 = B[ind1];
Vec3V b2 = B[ind2];
for (;;)
{
const Vec3V ab = V3Sub(q1, q0);
const Vec3V ac = V3Sub(q2, q0);
const Vec3V bc = V3Sub(q2, q1);
const FloatV dab = V3Dot(ab, ab);
const FloatV dac = V3Dot(ac, ac);
const FloatV dbc = V3Dot(bc, bc);
const FloatV fMax = FMax(dab, FMax(dac, dbc));
const FloatV fMin = FMin(dab, FMin(dac, dbc));
const Vec3V w = V3Cross(ab, ac);
const FloatV area = V3Length(w);
const FloatV ratio = FDiv(FSqrt(fMax), FSqrt(fMin));
if (FAllGrtr(four, ratio) && FAllGrtr(sixty, area))
break;
//calculate the triangle normal
const Vec3V triNormal = V3Normalize(w);
NVBLAST_ASSERT(V3AllEq(triNormal, V3Zero()) == 0);
//split the longest edge
if (FAllGrtrOrEq(dab, dac) && FAllGrtrOrEq(dab, dbc))
{
//split edge q0q1
const Vec3V midPoint = V3Scale(V3Add(q0, q1), half);
const Vec3V midA = V3Scale(V3Add(a0, a1), half);
const Vec3V midB = V3Scale(V3Add(b0, b1), half);
const Vec3V v = V3Sub(midPoint, q2);
const Vec3V n = V3Normalize(V3Cross(v, triNormal));
const FloatV d = FNeg(V3Dot(n, midPoint));
const FloatV dp = FAdd(V3Dot(n, q0), d);
const FloatV sum = FMul(d, dp);
if (FAllGrtr(sum, zero))
{
//q0 and origin at the same side, split triangle[q0, m, q2]
q1 = midPoint;
a1 = midA;
b1 = midB;
}
else
{
//q1 and origin at the same side, split triangle[m, q1, q2]
q0 = midPoint;
a0 = midA;
b0 = midB;
}
}
else if (FAllGrtrOrEq(dac, dbc))
{
//split edge q0q2
const Vec3V midPoint = V3Scale(V3Add(q0, q2), half);
const Vec3V midA = V3Scale(V3Add(a0, a2), half);
const Vec3V midB = V3Scale(V3Add(b0, b2), half);
const Vec3V v = V3Sub(midPoint, q1);
const Vec3V n = V3Normalize(V3Cross(v, triNormal));
const FloatV d = FNeg(V3Dot(n, midPoint));
const FloatV dp = FAdd(V3Dot(n, q0), d);
const FloatV sum = FMul(d, dp);
if (FAllGrtr(sum, zero))
{
//q0 and origin at the same side, split triangle[q0, q1, m]
q2 = midPoint;
a2 = midA;
b2 = midB;
}
else
{
//q2 and origin at the same side, split triangle[m, q1, q2]
q0 = midPoint;
a0 = midA;
b0 = midB;
}
}
else
{
//split edge q1q2
const Vec3V midPoint = V3Scale(V3Add(q1, q2), half);
const Vec3V midA = V3Scale(V3Add(a1, a2), half);
const Vec3V midB = V3Scale(V3Add(b1, b2), half);
const Vec3V v = V3Sub(midPoint, q0);
const Vec3V n = V3Normalize(V3Cross(v, triNormal));
const FloatV d = FNeg(V3Dot(n, midPoint));
const FloatV dp = FAdd(V3Dot(n, q1), d);
const FloatV sum = FMul(d, dp);
if (FAllGrtr(sum, zero))
{
//q1 and origin at the same side, split triangle[q0, q1, m]
q2 = midPoint;
a2 = midA;
b2 = midB;
}
else
{
//q2 and origin at the same side, split triangle[q0, m, q2]
q1 = midPoint;
a1 = midA;
b1 = midB;
}
}
}
//P must project inside face region. Compute Q using Barycentric coordinates
ab_ = V3Sub(q1, q0);
ac_ = V3Sub(q2, q0);
ap = V3Neg(q0);
bp = V3Neg(q1);
cp = V3Neg(q2);
d1 = V3Dot(ab_, ap); // snom
d2 = V3Dot(ac_, ap); // tnom
d3 = V3Dot(ab_, bp); // -sdenom
d4 = V3Dot(ac_, bp); // unom = d4 - d3
d5 = V3Dot(ab_, cp); // udenom = d5 - d6
d6 = V3Dot(ac_, cp); // -tdenom
va = FNegScaleSub(d5, d4, FMul(d3, d6));//edge region of BC
vb = FNegScaleSub(d1, d6, FMul(d5, d2));//edge region of AC
vc = FNegScaleSub(d3, d2, FMul(d1, d4));//edge region of AB
const FloatV toRecipD = FAdd(va, FAdd(vb, vc));
const FloatV denom = FRecip(toRecipD);//V4GetW(recipTmp);
const Vec3V v0 = V3Sub(a1, a0);
const Vec3V v1 = V3Sub(a2, a0);
const Vec3V w0 = V3Sub(b1, b0);
const Vec3V w1 = V3Sub(b2, b0);
const FloatV t = FMul(vb, denom);
const FloatV w = FMul(vc, denom);
const Vec3V vA1 = V3Scale(v1, w);
const Vec3V vB1 = V3Scale(w1, w);
const Vec3V tempClosestA = V3Add(a0, V3ScaleAdd(v0, t, vA1));
const Vec3V tempClosestB = V3Add(b0, V3ScaleAdd(w0, t, vB1));
closestA = tempClosestA;
closestB = tempClosestB;
return V3Sub(tempClosestA, tempClosestB);
}
NV_NOALIAS Vec3V closestPtPointTetrahedronTesselation(Vec3V* NV_RESTRICT Q, Vec3V* NV_RESTRICT A, Vec3V* NV_RESTRICT B, uint32_t& size, Vec3V& closestA, Vec3V& closestB)
{
const FloatV eps = FEps();
const Vec3V zeroV = V3Zero();
uint32_t tempSize = size;
FloatV bestSqDist = FLoad(NV_MAX_F32);
const Vec3V a = Q[0];
const Vec3V b = Q[1];
const Vec3V c = Q[2];
const Vec3V d = Q[3];
const BoolV bTrue = BTTTT();
const BoolV bFalse = BFFFF();
//degenerated
const Vec3V ad = V3Sub(d, a);
const Vec3V bd = V3Sub(d, b);
const Vec3V cd = V3Sub(d, c);
const FloatV dad = V3Dot(ad, ad);
const FloatV dbd = V3Dot(bd, bd);
const FloatV dcd = V3Dot(cd, cd);
const FloatV fMin = FMin(dad, FMin(dbd, dcd));
if (FAllGrtr(eps, fMin))
{
size = 3;
uint32_t tempIndices[] = { 0, 1, 2 };
return closestPtPointTriangleTesselation(Q, A, B, tempIndices, size, closestA, closestB);
}
Vec3V _Q[] = { Q[0], Q[1], Q[2], Q[3] };
Vec3V _A[] = { A[0], A[1], A[2], A[3] };
Vec3V _B[] = { B[0], B[1], B[2], B[3] };
uint32_t indices[3] = { 0, 1, 2 };
const BoolV bIsOutside4 = PointOutsideOfPlane4(a, b, c, d);
if (BAllEq(bIsOutside4, bFalse))
{
//origin is inside the tetrahedron, we are done
return zeroV;
}
Vec3V result = zeroV;
Vec3V tempClosestA, tempClosestB;
if (BAllEq(BGetX(bIsOutside4), bTrue))
{
uint32_t tempIndices[] = { 0, 1, 2 };
uint32_t _size = 3;
result = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB);
const FloatV sqDist = V3Dot(result, result);
bestSqDist = sqDist;
indices[0] = tempIndices[0];
indices[1] = tempIndices[1];
indices[2] = tempIndices[2];
tempSize = _size;
closestA = tempClosestA;
closestB = tempClosestB;
}
if (BAllEq(BGetY(bIsOutside4), bTrue))
{
uint32_t tempIndices[] = { 0, 2, 3 };
uint32_t _size = 3;
const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB);
const FloatV sqDist = V3Dot(q, q);
const BoolV con = FIsGrtr(bestSqDist, sqDist);
if (BAllEq(con, bTrue))
{
result = q;
bestSqDist = sqDist;
indices[0] = tempIndices[0];
indices[1] = tempIndices[1];
indices[2] = tempIndices[2];
tempSize = _size;
closestA = tempClosestA;
closestB = tempClosestB;
}
}
if (BAllEq(BGetZ(bIsOutside4), bTrue))
{
uint32_t tempIndices[] = { 0, 3, 1 };
uint32_t _size = 3;
const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB);
const FloatV sqDist = V3Dot(q, q);
const BoolV con = FIsGrtr(bestSqDist, sqDist);
if (BAllEq(con, bTrue))
{
result = q;
bestSqDist = sqDist;
indices[0] = tempIndices[0];
indices[1] = tempIndices[1];
indices[2] = tempIndices[2];
tempSize = _size;
closestA = tempClosestA;
closestB = tempClosestB;
}
}
if (BAllEq(BGetW(bIsOutside4), bTrue))
{
uint32_t tempIndices[] = { 1, 3, 2 };
uint32_t _size = 3;
const Vec3V q = closestPtPointTriangleTesselation(_Q, _A, _B, tempIndices, _size, tempClosestA, tempClosestB);
const FloatV sqDist = V3Dot(q, q);
const BoolV con = FIsGrtr(bestSqDist, sqDist);
if (BAllEq(con, bTrue))
{
result = q;
bestSqDist = sqDist;
indices[0] = tempIndices[0];
indices[1] = tempIndices[1];
indices[2] = tempIndices[2];
tempSize = _size;
closestA = tempClosestA;
closestB = tempClosestB;
}
}
A[0] = _A[indices[0]]; A[1] = _A[indices[1]]; A[2] = _A[indices[2]];
B[0] = _B[indices[0]]; B[1] = _B[indices[1]]; B[2] = _B[indices[2]];
Q[0] = _Q[indices[0]]; Q[1] = _Q[indices[1]]; Q[2] = _Q[indices[2]];
size = tempSize;
return result;
}
NV_NOALIAS NV_FORCE_INLINE Vec3V doTesselation(Vec3V* NV_RESTRICT Q, Vec3V* NV_RESTRICT A, Vec3V* NV_RESTRICT B,
const Vec3VArg support, const Vec3VArg supportA, const Vec3VArg supportB, uint32_t& size, Vec3V& closestA, Vec3V& closestB)
{
switch (size)
{
case 1:
{
closestA = supportA;
closestB = supportB;
return support;
}
case 2:
{
return closestPtPointSegmentTesselation(Q[0], support, A[0], supportA, B[0], supportB, size, closestA, closestB);
}
case 3:
{
uint32_t tempIndices[3] = { 0, 1, 2 };
return closestPtPointTriangleTesselation(Q, A, B, tempIndices, size, closestA, closestB);
}
case 4:
{
return closestPtPointTetrahedronTesselation(Q, A, B, size, closestA, closestB);
}
default:
NVBLAST_ASSERT(0);
}
return support;
}
enum Status
{
STATUS_NON_INTERSECT,
STATUS_CONTACT,
STATUS_DEGENERATE,
};
struct Output
{
/// Get the normal to push apart in direction from A to B
NV_FORCE_INLINE Vec3V getNormal() const { return V3Normalize(V3Sub(mClosestB, mClosestA)); }
Vec3V mClosestA; ///< Closest point on A
Vec3V mClosestB; ///< Closest point on B
FloatV mDistSq;
};
struct ConvexV
{
void calcExtent(const Vec3V& dir, float& minOut, float& maxOut) const
{
// Expand
const Vec4V x = Vec4V_From_FloatV(V3GetX(dir));
const Vec4V y = Vec4V_From_FloatV(V3GetY(dir));
const Vec4V z = Vec4V_From_FloatV(V3GetZ(dir));
const Vec4V* src = mAovVertices;
const Vec4V* end = src + mNumAovVertices * 3;
// Do first step
Vec4V max = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2])));
Vec4V min = max;
src += 3;
// Do the rest
for (; src < end; src += 3)
{
const Vec4V dot = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2])));
max = V4Max(dot, max);
min = V4Min(dot, min);
}
FStore(V4ExtractMax(max), &maxOut);
FStore(V4ExtractMin(min), &minOut);
}
Vec3V calcSupport(const Vec3V& dir) const
{
// Expand
const Vec4V x = Vec4V_From_FloatV(V3GetX(dir));
const Vec4V y = Vec4V_From_FloatV(V3GetY(dir));
const Vec4V z = Vec4V_From_FloatV(V3GetZ(dir));
NV_ALIGN(16, static const float index4const[]) = { 0.0f, 1.0f, 2.0f, 3.0f };
Vec4V index4 = *(const Vec4V*)index4const;
NV_ALIGN(16, static const float delta4const[]) = { 4.0f, 4.0f, 4.0f, 4.0f };
const Vec4V delta4 = *(const Vec4V*)delta4const;
const Vec4V* src = mAovVertices;
const Vec4V* end = src + mNumAovVertices * 3;
// Do first step
Vec4V max = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2])));
Vec4V maxIndex = index4;
index4 = V4Add(index4, delta4);
src += 3;
// Do the rest
for (; src < end; src += 3)
{
const Vec4V dot = V4MulAdd(x, src[0], V4MulAdd(y, src[1], V4Mul(z, src[2])));
const BoolV cmp = V4IsGrtr(dot, max);
max = V4Max(dot, max);
maxIndex = V4Sel(cmp, index4, maxIndex);
index4 = V4Add(index4, delta4);
}
Vec4V horiMax = Vec4V_From_FloatV(V4ExtractMax(max));
uint32_t mask = BGetBitMask(V4IsEq(horiMax, max));
const uint32_t simdIndex = (0x12131210 >> (mask + mask)) & uint32_t(3);
/// NOTE! Could be load hit store
/// Would be better to have all simd.
NV_ALIGN(16, float f[4]);
V4StoreA(maxIndex, f);
uint32_t index = uint32_t(uint32_t(f[simdIndex]));
const Vec4V* aovIndex = (mAovVertices + (index >> 2) * 3);
const float* aovOffset = ((const float*)aovIndex) + (index & 3);
return Vec3V_From_Vec4V(V4LoadXYZW(aovOffset[0], aovOffset[4], aovOffset[8], 1.0f));
}
const Vec4V* mAovVertices; ///< Vertices storex x,x,x,x, y,y,y,y, z,z,z,z
uint32_t mNumAovVertices; ///< Number of groups of 4 of vertices
};
Status Collide(const Vec3V& initialDir, const ConvexV& convexA, const Mat34V& bToA, const ConvexV& convexB, Output& out)
{
Vec3V Q[4];
Vec3V A[4];
Vec3V B[4];
Mat33V aToB = M34Trnsps33(bToA);
uint32_t size = 0;
const Vec3V zeroV = V3Zero();
const BoolV bTrue = BTTTT();
//Vec3V v = V3UnitX();
Vec3V v = V3Sel(FIsGrtr(V3Dot(initialDir, initialDir), FZero()), initialDir, V3UnitX());
//const FloatV minMargin = zero;
//const FloatV eps2 = FMul(minMargin, FLoad(0.01f));
//FloatV eps2 = zero;
FloatV eps2 = FLoad(1e-6f);
const FloatV epsRel = FLoad(0.000225f);
Vec3V closA(zeroV), closB(zeroV);
FloatV sDist = FMax();
FloatV minDist = sDist;
Vec3V closAA = zeroV;
Vec3V closBB = zeroV;
BoolV bNotTerminated = bTrue;
BoolV bCon = bTrue;
do
{
minDist = sDist;
closAA = closA;
closBB = closB;
uint32_t index = size++;
NVBLAST_ASSERT(index < 4);
const Vec3V supportA = convexA.calcSupport(V3Neg(v));
const Vec3V supportB = M34MulV3(bToA, convexB.calcSupport(M33MulV3(aToB, v)));
const Vec3V support = Vec3V_From_Vec4V(Vec4V_From_Vec3V(V3Sub(supportA, supportB)));
A[index] = supportA;
B[index] = supportB;
Q[index] = support;
const FloatV signDist = V3Dot(v, support);
const FloatV tmp0 = FSub(sDist, signDist);
if (FAllGrtr(FMul(epsRel, sDist), tmp0))
{
out.mClosestA = closA;
out.mClosestB = closB;
out.mDistSq = sDist;
return STATUS_NON_INTERSECT;
}
//calculate the closest point between two convex hull
v = doTesselation(Q, A, B, support, supportA, supportB, size, closA, closB);
sDist = V3Dot(v, v);
bCon = FIsGrtr(minDist, sDist);
bNotTerminated = BAnd(FIsGrtr(sDist, eps2), bCon);
} while (BAllEq(bNotTerminated, bTrue));
out.mClosestA = V3Sel(bCon, closA, closAA);
out.mClosestB = V3Sel(bCon, closB, closBB);
out.mDistSq = FSel(bCon, sDist, minDist);
return Status(BAllEq(bCon, bTrue) == 1 ? STATUS_CONTACT : STATUS_DEGENERATE);
}
static void _calcSeparation(const ConvexV& convexA, const nvidia::NvTransform& aToWorldIn, const Mat34V& bToA, ConvexV& convexB, const Vec3V& centroidAToB, Output& out, Separation& sep)
{
Mat33V aToB = M34Trnsps33(bToA);
Vec3V normalA = out.getNormal();
FloatV vEpsilon = FLoad(1e-6f);
if (BAllEqFFFF(FIsGrtr(out.mDistSq, vEpsilon)))
{
if (BAllEqTTTT(FIsGrtr(V3Dot(centroidAToB, centroidAToB), vEpsilon)))
{
normalA = V3Normalize(centroidAToB);
}
else
{
normalA = V3UnitX();
}
}
convexA.calcExtent(normalA, sep.min0, sep.max0);
Vec3V normalB = M33MulV3(aToB, normalA);
convexB.calcExtent(normalB, sep.min1, sep.max1);
{
// Offset the min max taking into account transform
// Distance of origin from B's space in As space in direction of the normal in As space should fix it...
float fix;
FStore(V3Dot(bToA.col3, normalA), &fix);
sep.min1 += fix;
sep.max1 += fix;
}
// Looks like it's the plane at the midpoint
Vec3V center = V3Scale(V3Add(out.mClosestA, out.mClosestB), FLoad(0.5f));
// Transform to world space
Mat34V aToWorld;
*(NvMat44*)&aToWorld = aToWorldIn;
// Put the normal in world space
Vec3V worldCenter = M34MulV3(aToWorld, center);
Vec3V worldNormal = M34Mul33V3(aToWorld, normalA);
FloatV dist = V3Dot(worldNormal, worldCenter);
V3StoreU(worldNormal, sep.plane.n);
FStore(dist, &sep.plane.d);
sep.plane.d = -sep.plane.d;
}
static void _arrayVec3ToVec4(const NvVec3* src, Vec4V* dst, uint32_t num)
{
const uint32_t num4 = num >> 2;
for (uint32_t i = 0; i < num4; i++, dst += 3, src += 4)
{
Vec3V v0 = V3LoadU(&src[0].x);
Vec3V v1 = V3LoadU(&src[1].x);
Vec3V v2 = V3LoadU(&src[2].x);
Vec3V v3 = V3LoadU(&src[3].x);
// Transpose
V4Transpose(v0, v1, v2, v3);
// Save
dst[0] = v0;
dst[1] = v1;
dst[2] = v2;
}
const uint32_t remain = num & 3;
if (remain)
{
Vec3V work[4];
uint32_t i = 0;
for (; i < remain; i++) work[i] = V3LoadU(&src[i].x);
for (; i < 4; i++) work[i] = work[remain - 1];
V4Transpose(work[0], work[1], work[2], work[3]);
dst[0] = work[0];
dst[1] = work[1];
dst[2] = work[2];
}
}
static void _arrayVec3ToVec4(const NvVec3* src, const Vec3V& scale, Vec4V* dst, uint32_t num)
{
// If no scale - use the faster version
if (V3AllEq(scale, V3One()))
{
return _arrayVec3ToVec4(src, dst, num);
}
const uint32_t num4 = num >> 2;
for (uint32_t i = 0; i < num4; i++, dst += 3, src += 4)
{
Vec3V v0 = V3Mul(scale, V3LoadU(&src[0].x));
Vec3V v1 = V3Mul(scale, V3LoadU(&src[1].x));
Vec3V v2 = V3Mul(scale, V3LoadU(&src[2].x));
Vec3V v3 = V3Mul(scale, V3LoadU(&src[3].x));
// Transpose
V4Transpose(v0, v1, v2, v3);
// Save
dst[0] = v0;
dst[1] = v1;
dst[2] = v2;
}
const uint32_t remain = num & 3;
if (remain)
{
Vec3V work[4];
uint32_t i = 0;
for (; i < remain; i++) work[i] = V3Mul(scale, V3LoadU(&src[i].x));
for (; i < 4; i++) work[i] = work[remain - 1];
V4Transpose(work[0], work[1], work[2], work[3]);
dst[0] = work[0];
dst[1] = work[1];
dst[2] = work[2];
}
}
// TODO: move this to a better long term home
// scope based helper struct to pick between stack and heap alloc based on the size of the request
struct ScopeMemoryAllocator {
public:
ScopeMemoryAllocator() : mAlloc(nullptr) {};
~ScopeMemoryAllocator()
{
this->free();
}
void* alloc(size_t buffSize)
{
if (mAlloc == nullptr)
{
mAlloc = NVBLAST_ALLOC(buffSize);
return mAlloc;
}
return nullptr;
}
void free()
{
if (mAlloc != nullptr)
{
NVBLAST_FREE(mAlloc);
mAlloc = nullptr;
}
}
private:
void* mAlloc;
};
#define STACK_ALLOC_LIMIT (100 * 1024)
#define ALLOCATE_TEMP_MEMORY(_out, buffSize) \
ScopeMemoryAllocator _out##Allocator; \
_out = (buffSize < STACK_ALLOC_LIMIT ? NvBlastAlloca(buffSize) : _out##Allocator.alloc(buffSize))
bool importerHullsInProximityApexFree(uint32_t hull0Count, const NvVec3* hull0, NvBounds3& hull0Bounds, const nvidia::NvTransform& localToWorldRT0In, const nvidia::NvVec3& scale0In,
uint32_t hull1Count, const NvVec3* hull1, NvBounds3& hull1Bounds, const nvidia::NvTransform& localToWorldRT1In, const nvidia::NvVec3& scale1In,
float maxDistance, Separation* separation)
{
const uint32_t numVerts0 = static_cast<uint32_t>(hull0Count);
const uint32_t numVerts1 = static_cast<uint32_t>(hull1Count);
const uint32_t numAov0 = (numVerts0 + 3) >> 2;
const uint32_t numAov1 = (numVerts1 + 3) >> 2;
const uint32_t buffSize = (numAov0 + numAov1) * sizeof(Vec4V) * 3;
void* buff = nullptr;
ALLOCATE_TEMP_MEMORY(buff, buffSize);
Vec4V* verts0 = (Vec4V*)buff;
// Make sure it's aligned
NVBLAST_ASSERT((size_t(verts0) & 0xf) == 0);
Vec4V* verts1 = verts0 + (numAov0 * 3);
const Vec3V scale0 = V3LoadU(&scale0In.x);
const Vec3V scale1 = V3LoadU(&scale1In.x);
std::vector<NvVec3> vert0(numVerts0);
for (uint32_t i = 0; i < numVerts0; ++i)
{
vert0[i] = hull0[i];
}
std::vector<NvVec3> vert1(numVerts1);
for (uint32_t i = 0; i < numVerts1; ++i)
{
vert1[i] = hull1[i];
}
_arrayVec3ToVec4(vert0.data(), scale0, verts0, numVerts0);
_arrayVec3ToVec4(vert1.data(), scale1, verts1, numVerts1);
const NvTransform trans1To0 = localToWorldRT0In.transformInv(localToWorldRT1In);
// Load into simd mat
Mat34V bToA;
*(NvMat44*)&bToA = trans1To0;
(*(NvMat44*)&bToA).column3.w = 0.0f; // AOS wants the 4th component of Vec3V to be 0 to work properly
ConvexV convexA;
ConvexV convexB;
convexA.mNumAovVertices = numAov0;
convexA.mAovVertices = verts0;
convexB.mNumAovVertices = numAov1;
convexB.mAovVertices = verts1;
const nvidia::NvVec3 hullACenter = hull0Bounds.getCenter();
const nvidia::NvVec3 hullBCenter = hull1Bounds.getCenter();
const Vec3V centroidA = V3LoadU(&hullACenter.x);
const Vec3V centroidB = M34MulV3(bToA, V3LoadU(&hullBCenter.x));
// Take the origin of B in As space as the inital direction as it is 'the difference in transform origins B-A in A's space'
// Should be a good first guess
// Use centroid information
const Vec3V initialDir = V3Sub(centroidB, centroidA);
Output output;
Status status = Collide(initialDir, convexA, bToA, convexB, output);
if (status == STATUS_DEGENERATE)
{
// Calculate the tolerance from the extents
const NvVec3 extents0 = hull0Bounds.getExtents();
const NvVec3 extents1 = hull1Bounds.getExtents();
const FloatV tolerance0 = V3ExtractMin(V3Mul(V3LoadU(&extents0.x), scale0));
const FloatV tolerance1 = V3ExtractMin(V3Mul(V3LoadU(&extents1.x), scale1));
const FloatV tolerance = FMul(FAdd(tolerance0, tolerance1), FLoad(0.01f));
const FloatV sqTolerance = FMul(tolerance, tolerance);
status = FAllGrtr(sqTolerance, output.mDistSq) ? STATUS_CONTACT : STATUS_NON_INTERSECT;
}
switch (status)
{
case STATUS_CONTACT:
{
if (separation)
{
_calcSeparation(convexA, localToWorldRT0In, bToA, convexB, initialDir, output, *separation);
}
return true;
}
default:
case STATUS_NON_INTERSECT:
{
if (separation)
{
_calcSeparation(convexA, localToWorldRT0In, bToA, convexB, initialDir, output, *separation);
}
float val;
FStore(output.mDistSq, &val);
return val < (maxDistance * maxDistance);
}
}
}
} // namespace Blast
} // namespace Nv
| 34,605 | C++ | 30.806985 | 221 | 0.588412 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtTriangleProcessor.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTEXTTRIANGLEPROCESSOR_H
#define NVBLASTEXTTRIANGLEPROCESSOR_H
#include "NvVec2.h"
#include "NvVec3.h"
#include <vector>
#include <algorithm>
using namespace nvidia;
namespace Nv
{
namespace Blast
{
/**
Triangle processor internal triangle representation. Contains only vertex positions.
*/
struct TrPrcTriangle
{
NvVec3 points[3];
TrPrcTriangle(NvVec3 a = NvVec3(0.0f), NvVec3 b = NvVec3(0.0f), NvVec3 c = NvVec3(0.0f))
{
points[0] = a;
points[1] = b;
points[2] = c;
}
TrPrcTriangle& operator=(const TrPrcTriangle& b)
{
points[0] = b.points[0];
points[1] = b.points[1];
points[2] = b.points[2];
return *this;
}
TrPrcTriangle(const TrPrcTriangle& b)
{
points[0] = b.points[0];
points[1] = b.points[1];
points[2] = b.points[2];
}
NvVec3 getNormal() const
{
return (points[1] - points[0]).cross(points[2] - points[0]);
}
};
/**
Triangle processor internal 2D triangle representation. Contains only vertex positions.
*/
struct TrPrcTriangle2d
{
NvVec2 points[3];
TrPrcTriangle2d(NvVec2 a = NvVec2(0.0f), NvVec2 b = NvVec2(0.0f), NvVec2 c = NvVec2(0.0f))
{
points[0] = a;
points[1] = b;
points[2] = c;
}
TrPrcTriangle2d operator=(const TrPrcTriangle2d& b)
{
points[0] = b.points[0];
points[1] = b.points[1];
points[2] = b.points[2];
return *this;
}
TrPrcTriangle2d(const TrPrcTriangle2d& b)
{
points[0] = b.points[0];
points[1] = b.points[1];
points[2] = b.points[2];
}
};
class TriangleProcessor
{
public:
TriangleProcessor(){};
~TriangleProcessor() {}
/**
Build intersection between two triangles
\param[in] a First triangle (A)
\param[in] aProjected Projected triangle A
\param[in] b Second triangle (B)
\param[in] centroid Centroid of first triangle (A)
\param[out] intersectionBuffer Result intersection polygon
\param[in] normal Normal vector to triangle (Common for both A and B).
\return 1 - if if intersection is found.
*/
uint32_t getTriangleIntersection(TrPrcTriangle& a, TrPrcTriangle2d& aProjected, TrPrcTriangle& b, NvVec3& centroid,
std::vector<NvVec3>& intersectionBuffer, NvVec3 normal);
/**
Test whether BB of triangles intersect.
\param[in] a First triangle (A)
\param[in] b Second triangle (B)
\return true - if intersect
*/
bool triangleBoundingBoxIntersection(TrPrcTriangle2d& a, TrPrcTriangle2d& b);
/**
Test whether point is inside of triangle.
\param[in] point Point coordinates in 2d space.
\param[in] triangle Triangle in 2d space.
\return 1 - if inside, 2 if on edge, 0 if neither inside nor edge.
*/
uint32_t isPointInside(const NvVec2& point, const TrPrcTriangle2d& triangle);
/**
Segment intersection point
\param[in] s1 Segment-1 start point
\param[in] e1 Segment-1 end point
\param[in] s2 Segment-2 start point
\param[in] e2 Segment-2 end point
\param[out] t1 Intersection point parameter relatively to Segment-1, lies in [0.0, 1.0] range.
\return 0 if there is no intersections, 1 - if intersection is found.
*/
uint32_t getSegmentIntersection(const NvVec2& s1, const NvVec2& e1, const NvVec2& s2, const NvVec2& e2, float& t1);
/**
Sort vertices of polygon in CCW-order
*/
void sortToCCW(std::vector<NvVec3>& points, NvVec3& normal);
/**
Builds convex polygon for given set of points. Points should be coplanar.
\param[in] points Input array of points
\param[out] convexHull Output polygon
\param[in] normal Normal vector to polygon.
*/
void buildConvexHull(std::vector<NvVec3>& points, std::vector<NvVec3>& convexHull, const NvVec3& normal);
};
} // namespace Blast
} // namespace Nv
#endif // NVBLASTEXTTRIANGLEPROCESSOR_H
| 5,735 | C | 32.156069 | 119 | 0.6551 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringTriangulator.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTEXTAUTHORINGTRIANGULATOR_H
#define NVBLASTEXTAUTHORINGTRIANGULATOR_H
#include <vector>
#include <map>
#include "NvBlastExtAuthoringTypes.h"
#include "NvBlastExtAuthoringMesh.h"
#include "NvBlastExtAuthoringInternalCommon.h"
namespace Nv
{
namespace Blast
{
/**
Tool for doing all post processing steps of authoring.
*/
class Triangulator
{
public:
/**
Triangulates provided mesh and saves result internally. Uses Ear-clipping algorithm.
\param[in] mesh Mesh for triangulation
*/
void triangulate(const Mesh* mesh);
/**
\return Return array of triangles of base mesh.
*/
std::vector<Triangle>& getBaseMesh()
{
return mBaseMeshUVFittedTriangles;
}
std::vector<Triangle>& getBaseMeshNotFitted()
{
return mBaseMeshResultTriangles;
}
/**
\return Return array of TriangleIndexed of base mesh. Each TriangleIndexed contains index of corresponding vertex in internal vertex buffer.
*/
std::vector<TriangleIndexed>& getBaseMeshIndexed()
{
return mBaseMeshTriangles;
}
/**
\return Return mapping from vertices of input Mesh to internal vertices buffer. Used for island detection.
*/
std::vector<uint32_t>& getBaseMapping()
{
return mBaseMapping;
};
/**
\return Return mapping from vertices of input Mesh to internal vertices buffer, only positions are accounted. Used for island detection.
*/
std::vector<int32_t>& getPositionedMapping()
{
return mPositionMappedVrt;
};
/**
\return Return internal vertex buffer size. Vertices internally are welded with some threshold.
*/
uint32_t getWeldedVerticesCount()
{
return static_cast<uint32_t>(mVertices.size());
}
/**
Removes all information about mesh triangulation.
*/
void reset();
int32_t& getParentChunkId() { return parentChunkId; };
private:
int32_t parentChunkId;
int32_t addVerticeIfNotExist(const Vertex& p);
void addEdgeIfValid(EdgeWithParent& ed);
/* Data used before triangulation to build polygon loops*/
std::vector<Vertex> mVertices;
std::vector<EdgeWithParent> mBaseMeshEdges;
std::map<Vertex, int32_t, VrtComp> mVertMap;
std::map<EdgeWithParent, int32_t, EdgeComparator> mEdgeMap;
std::vector<uint32_t> mBaseMapping;
std::vector<int32_t> mPositionMappedVrt;
/* ------------------------------------------------------------ */
/**
Unite all almost similar vertices, update edges according to this changes
*/
void prepare(const Mesh* mesh);
void triangulatePolygonWithEarClipping(const std::vector<uint32_t>& inputPolygon, const Vertex* vert, const ProjectionDirections& dir);
void buildPolygonAndTriangulate(std::vector<Edge>& edges, Vertex* vertices, int32_t userData, int32_t materialId, int32_t smoothingGroup);
void computePositionedMapping();
std::vector<TriangleIndexed> mBaseMeshTriangles;
/**
Final triangles
*/
std::vector<Triangle> mBaseMeshResultTriangles;
std::vector<Triangle> mBaseMeshUVFittedTriangles;
};
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTEXTAUTHORINGTRIANGULATOR_H
| 5,401 | C | 35.5 | 169 | 0.633031 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringFractureToolImpl.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastExtAuthoringFractureToolImpl.h"
#include "NvBlastExtAuthoringMeshImpl.h"
#include "NvBlastExtAuthoringMeshUtils.h"
// This warning arises when using some stl containers with older versions of VC
// c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code
#if NV_VC && NV_VC < 14
#pragma warning(disable : 4702)
#endif
#include <queue>
#include <vector>
#include <map>
#include <stack>
#include <functional>
#include "NvBlastExtAuthoringVSA.h"
#include <float.h>
#include "NvBlastExtAuthoring.h"
#include "NvBlastExtAuthoringTriangulator.h"
#include "NvBlastExtAuthoringBooleanToolImpl.h"
#include "NvBlastExtAuthoringAcceleratorImpl.h"
#include "NvBlastExtAuthoringCutout.h"
#include "NvBlast.h"
#include "NvBlastGlobals.h"
#include "NvBlastExtAuthoringPerlinNoise.h"
#include <NvBlastAssert.h>
#include <NvBlastNvSharedHelpers.h>
#ifndef SAFE_DELETE
#define SAFE_DELETE(p) \
{ \
if (p) \
{ \
delete (p); \
(p) = NULL; \
} \
}
#endif
namespace Nv
{
namespace Blast
{
/*
Vector operations using TransformST
*/
inline TransformST
createCubeTMFromBounds(const NvcBounds3& bounds)
{
// scale = max extent, translation = center
const NvcVec3 center = 0.5f*(bounds.maximum + bounds.minimum);
const NvcVec3 extent = 0.5f*(bounds.maximum - bounds.minimum);
const float maxExtent = std::max(extent.x, std::max(extent.y, extent.z));
return {center, maxExtent > 0.0f ? maxExtent : 1.0f}; // Keep the transformation from being singular
}
//////////////////////////////////////////
struct Halfspace_partitioning : public VSA::VS3D_Halfspace_Set
{
std::vector<NvcPlane> planes;
VSA::real farthest_halfspace(VSA::real plane[4], const VSA::real point[4])
{
float biggest_d = -FLT_MAX;
for (uint32_t i = 0; i < planes.size(); ++i)
{
float d =
planes[i].n.x * point[0] + planes[i].n.y * point[1] + planes[i].n.z * point[2] + planes[i].d * point[3];
if (d > biggest_d)
{
biggest_d = d;
plane[0] = planes[i].n.x;
plane[1] = planes[i].n.y;
plane[2] = planes[i].n.z;
plane[3] = planes[i].d;
}
}
return biggest_d;
};
};
int32_t findCellBasePlanes(const std::vector<NvcVec3>& sites, std::vector<std::vector<std::pair<int32_t, int32_t>>>& neighbors)
{
Halfspace_partitioning prt;
std::vector<NvcPlane>& planes = prt.planes;
int32_t neighborGlobalIndex = 0;
neighbors.resize(sites.size());
for (uint32_t cellId = 0; cellId + 1 < sites.size(); ++cellId)
{
planes.clear();
planes.resize(sites.size() - 1 - cellId);
std::vector<NvcVec3> midpoints(sites.size() - 1);
int32_t collected = 0;
for (uint32_t i = cellId + 1; i < sites.size(); ++i)
{
NvcVec3 midpoint = 0.5 * (sites[i] + sites[cellId]);
NvcVec3 direction = fromNvShared(toNvShared(sites[i] - sites[cellId]).getNormalized());
planes[collected].n = direction;
planes[collected].d = -(direction | midpoint);
midpoints[collected] = midpoint;
++collected;
}
for (uint32_t i = 0; i < planes.size(); ++i)
{
planes[i].n = -planes[i].n;
planes[i].d = -planes[i].d;
if (VSA::vs3d_test(prt))
{
const uint32_t nId = i + cellId + 1;
neighbors[cellId].push_back(std::pair<int32_t, int32_t>(nId, neighborGlobalIndex));
neighbors[nId].push_back(std::pair<int32_t, int32_t>(cellId, neighborGlobalIndex));
++neighborGlobalIndex;
};
planes[i].n = -planes[i].n;
planes[i].d = -planes[i].d;
}
}
return neighborGlobalIndex;
}
#define SITE_BOX_SIZE 4
#define CUTTING_BOX_SIZE 40
Mesh* getCellMesh(BooleanEvaluator& eval, int32_t planeIndexerOffset, int32_t cellId, const std::vector<NvcVec3>& sites,
const std::vector<std::vector<std::pair<int32_t, int32_t>>>& neighbors, int32_t interiorMaterialId, NvcVec3 origin)
{
Mesh* cell = getBigBox(toNvShared(origin), SITE_BOX_SIZE, interiorMaterialId);
Mesh* cuttingMesh = getCuttingBox(NvVec3(0, 0, 0), NvVec3(1, 1, 1), CUTTING_BOX_SIZE, 0, interiorMaterialId);
for (uint32_t i = 0; i < neighbors[cellId].size(); ++i)
{
std::pair<int32_t, int32_t> neighbor = neighbors[cellId][i];
int32_t nCell = neighbor.first;
NvVec3 midpoint = 0.5 * toNvShared(sites[nCell] + sites[cellId]);
NvVec3 direction = toNvShared(sites[nCell] - sites[cellId]).getNormalized();
int32_t planeIndex = neighbor.second + planeIndexerOffset;
if (nCell < cellId)
planeIndex = -planeIndex;
setCuttingBox(midpoint, -direction, cuttingMesh, CUTTING_BOX_SIZE, planeIndex);
eval.performFastCutting(cell, cuttingMesh, BooleanConfigurations::BOOLEAN_INTERSECTION());
Mesh* newCell = eval.createNewMesh();
delete cell;
cell = newCell;
if (cell == nullptr)
break;
}
delete cuttingMesh;
return cell;
}
#define MAX_VORONOI_ATTEMPT_NUMBER 450
VoronoiSitesGeneratorImpl::VoronoiSitesGeneratorImpl(const Mesh* mesh, RandomGeneratorBase* rnd)
{
mMesh = mesh;
mRnd = rnd;
mAccelerator = new BBoxBasedAccelerator(mMesh, kBBoxBasedAcceleratorDefaultResolution);
mStencil = nullptr;
}
void VoronoiSitesGeneratorImpl::setBaseMesh(const Mesh* m)
{
mGeneratedSites.clear();
delete mAccelerator;
mMesh = m;
mAccelerator = new BBoxBasedAccelerator(mMesh, kBBoxBasedAcceleratorDefaultResolution);
}
VoronoiSitesGeneratorImpl::~VoronoiSitesGeneratorImpl()
{
delete mAccelerator;
mAccelerator = nullptr;
}
void VoronoiSitesGeneratorImpl::release()
{
delete this;
}
void VoronoiSitesGeneratorImpl::setStencil(const Mesh* stencil)
{
mStencil = stencil;
}
void VoronoiSitesGeneratorImpl::clearStencil()
{
mStencil = nullptr;
}
void VoronoiSitesGeneratorImpl::uniformlyGenerateSitesInMesh(const uint32_t sitesCount)
{
BooleanEvaluator voronoiMeshEval;
NvcVec3 mn = mMesh->getBoundingBox().minimum;
NvcVec3 mx = mMesh->getBoundingBox().maximum;
NvcVec3 vc = mx - mn;
uint32_t attemptNumber = 0;
uint32_t generatedSites = 0;
while (generatedSites < sitesCount && attemptNumber < MAX_VORONOI_ATTEMPT_NUMBER)
{
float rn1 = mRnd->getRandomValue() * vc.x;
float rn2 = mRnd->getRandomValue() * vc.y;
float rn3 = mRnd->getRandomValue() * vc.z;
if (voronoiMeshEval.isPointContainedInMesh(mMesh, NvcVec3{ rn1, rn2, rn3 } + mn) &&
(mStencil == nullptr || voronoiMeshEval.isPointContainedInMesh(mStencil, NvcVec3{ rn1, rn2, rn3 } + mn)))
{
generatedSites++;
mGeneratedSites.push_back(NvcVec3{ rn1, rn2, rn3 } + mn);
attemptNumber = 0;
}
else
{
attemptNumber++;
if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER)
break;
}
}
}
void VoronoiSitesGeneratorImpl::clusteredSitesGeneration(const uint32_t numberOfClusters,
const uint32_t sitesPerCluster, float clusterRadius)
{
BooleanEvaluator voronoiMeshEval;
NvcVec3 mn = mMesh->getBoundingBox().minimum;
NvcVec3 mx = mMesh->getBoundingBox().maximum;
NvcVec3 middle = (mx + mn) * 0.5;
NvcVec3 vc = (mx - mn) * 0.5;
uint32_t attemptNumber = 0;
uint32_t generatedSites = 0;
std::vector<NvcVec3> tempPoints;
while (generatedSites < numberOfClusters)
{
float rn1 = mRnd->getRandomValue() * 2 - 1;
float rn2 = mRnd->getRandomValue() * 2 - 1;
float rn3 = mRnd->getRandomValue() * 2 - 1;
NvcVec3 p = { middle.x + rn1 * vc.x, middle.y + rn2 * vc.y, middle.z + rn3 * vc.z };
if (voronoiMeshEval.isPointContainedInMesh(mMesh, p) &&
(mStencil == nullptr || voronoiMeshEval.isPointContainedInMesh(mStencil, p)))
{
generatedSites++;
tempPoints.push_back(p);
attemptNumber = 0;
}
else
{
attemptNumber++;
if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER)
break;
}
}
int32_t totalCount = 0;
for (; tempPoints.size() > 0; tempPoints.pop_back())
{
uint32_t unif = sitesPerCluster;
generatedSites = 0;
while (generatedSites < unif)
{
NvcVec3 p =
tempPoints.back() + fromNvShared(NvVec3(mRnd->getRandomValue() * 2 - 1, mRnd->getRandomValue() * 2 - 1,
mRnd->getRandomValue() * 2 - 1)
.getNormalized()) *
(mRnd->getRandomValue() + 0.001f) * clusterRadius;
if (voronoiMeshEval.isPointContainedInMesh(mMesh, p) &&
(mStencil == nullptr || voronoiMeshEval.isPointContainedInMesh(mStencil, p)))
{
totalCount++;
generatedSites++;
mGeneratedSites.push_back(p);
attemptNumber = 0;
}
else
{
attemptNumber++;
if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER)
break;
}
}
}
}
#define IN_SPHERE_ATTEMPT_NUMBER 20
void VoronoiSitesGeneratorImpl::addSite(const NvcVec3& site)
{
mGeneratedSites.push_back(site);
}
void VoronoiSitesGeneratorImpl::generateInSphere(const uint32_t count, const float radius, const NvcVec3& center)
{
BooleanEvaluator voronoiMeshEval;
uint32_t attemptNumber = 0;
uint32_t generatedSites = 0;
std::vector<NvcVec3> tempPoints;
float radiusSquared = radius * radius;
while (generatedSites < count && attemptNumber < MAX_VORONOI_ATTEMPT_NUMBER)
{
float rn1 = (mRnd->getRandomValue() - 0.5f) * 2.f * radius;
float rn2 = (mRnd->getRandomValue() - 0.5f) * 2.f * radius;
float rn3 = (mRnd->getRandomValue() - 0.5f) * 2.f * radius;
NvcVec3 point = { rn1, rn2, rn3 };
if (toNvShared(point).magnitudeSquared() < radiusSquared &&
voronoiMeshEval.isPointContainedInMesh(mMesh, point + center) &&
(mStencil == nullptr || voronoiMeshEval.isPointContainedInMesh(mStencil, point + center)))
{
generatedSites++;
mGeneratedSites.push_back(point + center);
attemptNumber = 0;
}
else
{
attemptNumber++;
if (attemptNumber > MAX_VORONOI_ATTEMPT_NUMBER)
break;
}
}
}
void VoronoiSitesGeneratorImpl::deleteInSphere(const float radius, const NvcVec3& center, float deleteProbability)
{
float r2 = radius * radius;
for (uint32_t i = 0; i < mGeneratedSites.size(); ++i)
{
if (toNvShared(mGeneratedSites[i] - center).magnitudeSquared() < r2 && mRnd->getRandomValue() <= deleteProbability)
{
std::swap(mGeneratedSites[i], mGeneratedSites.back());
mGeneratedSites.pop_back();
--i;
}
}
}
void VoronoiSitesGeneratorImpl::radialPattern(const NvcVec3& center, const NvcVec3& normal, float radius,
int32_t angularSteps, int32_t radialSteps, float angleOffset,
float variability)
{
// mGeneratedSites.push_back(center);
NvVec3 t1, t2;
if (std::abs(normal.z) < 0.9)
{
t1 = toNvShared(normal).cross(NvVec3(0, 0, 1));
}
else
{
t1 = toNvShared(normal).cross(NvVec3(1, 0, 0));
}
t2 = t1.cross(toNvShared(normal));
t1.normalize();
t2.normalize();
float radStep = radius / radialSteps;
int32_t cCr = 0;
float angleStep = nvidia::NvPi * 2 / angularSteps;
for (float cRadius = radStep; cRadius < radius; cRadius += radStep)
{
float cAngle = angleOffset * cCr;
for (int32_t i = 0; i < angularSteps; ++i)
{
float angVars = mRnd->getRandomValue() * variability + (1.0f - 0.5f * variability);
float radVars = mRnd->getRandomValue() * variability + (1.0f - 0.5f * variability);
NvcVec3 nPos = fromNvShared(std::cos(cAngle * angVars) * t1 + std::sin(cAngle * angVars) * t2) * cRadius * radVars + center;
mGeneratedSites.push_back(nPos);
cAngle += angleStep;
}
++cCr;
}
}
uint32_t VoronoiSitesGeneratorImpl::getVoronoiSites(const NvcVec3*& sites)
{
if (mGeneratedSites.size())
{
sites = &mGeneratedSites[0];
}
return (uint32_t)mGeneratedSites.size();
}
int32_t
FractureToolImpl::voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPointsIn, bool replaceChunk)
{
if (chunkId == 0 && replaceChunk)
{
return 1;
}
int32_t chunkInfoIndex = getChunkInfoIndex(chunkId);
if (chunkInfoIndex == -1 || cellCount < 2)
{
return 1;
}
if (!mChunkData[chunkInfoIndex].isLeaf)
{
deleteChunkSubhierarchy(chunkId);
}
chunkInfoIndex = getChunkInfoIndex(chunkId);
Mesh* mesh = mChunkData[chunkInfoIndex].getMesh();
const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld();
std::vector<NvcVec3> cellPoints(cellCount);
for (uint32_t i = 0; i < cellCount; ++i)
{
cellPoints[i] = tm.invTransformPos(cellPointsIn[i]);
}
/**
Prebuild accelerator structure
*/
BooleanEvaluator eval;
BooleanEvaluator voronoiMeshEval;
BBoxBasedAccelerator spAccel = BBoxBasedAccelerator(mesh, kBBoxBasedAcceleratorDefaultResolution);
std::vector<std::vector<std::pair<int32_t, int32_t>>> neighbors;
const int32_t neighborCount = findCellBasePlanes(cellPoints, neighbors);
/**
Fracture
*/
int32_t parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId;
std::vector<uint32_t> newlyCreatedChunksIds;
for (uint32_t i = 0; i < cellPoints.size(); ++i)
{
Mesh* cell =
getCellMesh(eval, mPlaneIndexerOffset, i, cellPoints, neighbors, mInteriorMaterialId, cellPoints[i]);
if (cell == nullptr)
{
continue;
}
DummyAccelerator dmAccel(cell->getFacetCount());
voronoiMeshEval.performBoolean(mesh, cell, &spAccel, &dmAccel, BooleanConfigurations::BOOLEAN_INTERSECTION());
Mesh* resultMesh = voronoiMeshEval.createNewMesh();
if (resultMesh)
{
uint32_t ncidx = createNewChunk(parentChunkId);
mChunkData[ncidx].isLeaf = true;
setChunkInfoMesh(mChunkData[ncidx], resultMesh);
newlyCreatedChunksIds.push_back(mChunkData[ncidx].chunkId);
}
eval.reset();
delete cell;
}
mChunkData[chunkInfoIndex].isLeaf = false;
if (replaceChunk)
{
deleteChunkSubhierarchy(chunkId, true);
}
mPlaneIndexerOffset += neighborCount;
if (mRemoveIslands)
{
for (auto chunkToCheck : newlyCreatedChunksIds)
{
islandDetectionAndRemoving(chunkToCheck);
}
}
return 0;
}
template<typename Cmp>
static void compactifyAndTransformVertexBuffer
(
std::vector<Nv::Blast::Vertex>& vertexBuffer,
Edge* edges,
const Nv::Blast::Vertex* sourceVertices,
uint32_t numSourceVerts,
uint32_t numEdges,
const TransformST& tm
)
{
std::vector<uint32_t> indexMap;
indexMap.reserve(numSourceVerts);
std::map<Vertex, uint32_t, Cmp> vertexMapping;
for (uint32_t i = 0; i < numSourceVerts; i++)
{
const auto& vert = sourceVertices[i];
auto it = vertexMapping.find(vert);
if (it == vertexMapping.end())
{
const uint32_t size = static_cast<uint32_t>(vertexBuffer.size());
vertexMapping[vert] = size;
// transform the position and normalZ back to world space before storing it
Nv::Blast::Vertex transformedVert = vert;
transformedVert.p = tm.transformPos(vert.p);
vertexBuffer.push_back(transformedVert);
indexMap.push_back(size);
}
else
{
indexMap.push_back(it->second);
}
}
// now we need convert the list of edges to be based on the compacted vertex buffer
for (uint32_t i = 0; i < numEdges; i++) {
Edge &edge = edges[i];
edge.s = indexMap[edges[i].s];
edge.e = indexMap[edges[i].e];
}
}
Mesh* FractureToolImpl::createChunkMesh(int32_t chunkInfoIndex, bool splitUVs /* = true */)
{
// make sure the chunk is valid
if (chunkInfoIndex < 0 || uint32_t(chunkInfoIndex) >= this->getChunkCount()) {
return nullptr;
}
// grab the original source mesh
const auto sourceMesh = this->getChunkInfo(chunkInfoIndex).getMesh();
if (!sourceMesh) {
return nullptr;
}
const Nv::Blast::Vertex* sourceVertices = sourceMesh->getVertices();
const uint32_t numSourceVerts = sourceMesh->getVerticesCount();
const auto sourceEdges = sourceMesh->getEdges();
const auto numEdges = sourceMesh->getEdgesCount();
const auto edgeBufferSize = numEdges * sizeof(Edge);
Edge* edges = reinterpret_cast<Edge*>(NVBLAST_ALLOC(edgeBufferSize));
memcpy(edges, sourceEdges, edgeBufferSize);
const TransformST& tm = this->getChunkInfo(chunkInfoIndex).getTmToWorld();
std::vector<Vertex> _vertexBuffer;
if (splitUVs)
compactifyAndTransformVertexBuffer<VrtComp>(_vertexBuffer, edges, sourceVertices, numSourceVerts, numEdges, tm);
else
compactifyAndTransformVertexBuffer<VrtCompNoUV>(_vertexBuffer, edges, sourceVertices, numSourceVerts, numEdges, tm);
// now fix the order of the edges
// compacting the vertex buffer can put them out of order
// the end of one edge needs to be the start of the next
const auto facets = sourceMesh->getFacetsBuffer();
const auto facetsCount = sourceMesh->getFacetCount();
Vertex* vertices = reinterpret_cast<Vertex*>(_vertexBuffer.data());
const auto numVerts = static_cast<uint32_t>(_vertexBuffer.size());
nvidia::NvBounds3 bnd;
bnd.setEmpty();
std::set<int32_t> vertUVsToFix;
for (uint32_t f = 0; f < facetsCount; f++) {
const Facet& facet = facets[f];
uint32_t nextIndex = edges[facet.firstEdgeNumber].e;
for (uint32_t edge = 1; edge < facet.edgesCount; edge++) {
for (uint32_t test = edge; test < facet.edgesCount; test++) {
if (nextIndex == edges[facet.firstEdgeNumber + test].s) {
if (test != edge) {
std::swap(edges[facet.firstEdgeNumber + edge], edges[facet.firstEdgeNumber + test]);
}
nextIndex = edges[facet.firstEdgeNumber + edge].e;
break;
}
}
// make sure the last edge wraps around and points back at the first edge
NVBLAST_ASSERT(edges[facet.firstEdgeNumber + edge - 1].e == edges[facet.firstEdgeNumber + edge].s);
}
// we need to de-normalize the UVs for interior faces
// build a set of interior vertex indices as we inflate the bounds to include all the UVs
if (facet.userData != 0) {
for (uint32_t edge = 0; edge < facet.edgesCount; edge++) {
const int32_t v1 = edges[facet.firstEdgeNumber + edge].s;
if (vertUVsToFix.insert(v1).second) {
bnd.include(NvVec3(vertices[v1].uv[0].x, vertices[v1].uv[0].y, 0.0f));
}
const int32_t v2 = edges[facet.firstEdgeNumber + edge].e;
if (vertUVsToFix.insert(v2).second) {
bnd.include(NvVec3(vertices[v2].uv[0].x, vertices[v2].uv[0].y, 0.0f));
}
}
}
}
const float xscale = (bnd.maximum.x - bnd.minimum.x);
const float yscale = (bnd.maximum.y - bnd.minimum.y);
const float scale = 1.0f / std::min(xscale, yscale); // To have uniform scaling
for (auto vertIdx: vertUVsToFix) {
NVBLAST_ASSERT(uint32_t(vertIdx) < numVerts);
auto& vert = vertices[vertIdx];
vert.uv[0].x = (vert.uv[0].x - bnd.minimum.x) * scale;
vert.uv[0].y = (vert.uv[0].y - bnd.minimum.y) * scale;
}
// build a new mesh from the converted data
Mesh* chunkMesh = new MeshImpl(vertices, edges, facets, numVerts, numEdges, facetsCount);
NVBLAST_FREE(edges);
return chunkMesh;
}
bool FractureToolImpl::isMeshContainOpenEdges(const Mesh* input)
{
std::map<NvcVec3, int32_t, VrtPositionComparator> vertexMapping;
std::vector<int32_t> vertexRemappingArray(input->getVerticesCount());
std::vector<Edge> remappedEdges(input->getEdgesCount());
/**
Remap vertices
*/
const Vertex* vrx = input->getVertices();
for (uint32_t i = 0; i < input->getVerticesCount(); ++i)
{
auto it = vertexMapping.find(vrx->p);
if (it == vertexMapping.end())
{
vertexMapping[vrx->p] = i;
vertexRemappingArray[i] = i;
}
else
{
vertexRemappingArray[i] = it->second;
}
++vrx;
}
const Edge* ed = input->getEdges();
for (uint32_t i = 0; i < input->getEdgesCount(); ++i)
{
remappedEdges[i].s = vertexRemappingArray[ed->s];
remappedEdges[i].e = vertexRemappingArray[ed->e];
if (remappedEdges[i].e < remappedEdges[i].s)
{
std::swap(remappedEdges[i].s, remappedEdges[i].e);
}
++ed;
}
std::sort(remappedEdges.begin(), remappedEdges.end());
int32_t collected = 1;
for (uint32_t i = 1; i < remappedEdges.size(); ++i)
{
if (remappedEdges[i - 1].s == remappedEdges[i].s && remappedEdges[i - 1].e == remappedEdges[i].e)
{
collected++;
}
else
{
if (collected & 1)
{
return true;
}
else
{
collected = 1;
}
}
}
return collected & 1;
}
int32_t FractureToolImpl::voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPointsIn,
const NvcVec3& scale, const NvcQuat& rotation, bool replaceChunk)
{
if (chunkId == 0 && replaceChunk)
{
return 1;
}
int32_t chunkInfoIndex = getChunkInfoIndex(chunkId);
if (chunkInfoIndex == -1 || cellCount < 2)
{
return 1;
}
if (!mChunkData[chunkInfoIndex].isLeaf)
{
deleteChunkSubhierarchy(chunkId);
}
chunkInfoIndex = getChunkInfoIndex(chunkId);
Mesh* mesh = mChunkData[chunkInfoIndex].getMesh();
const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld();
std::vector<NvcVec3> cellPoints(cellCount);
for (uint32_t i = 0; i < cellCount; ++i)
{
cellPoints[i] = tm.invTransformPos(cellPointsIn[i]);
toNvShared(cellPoints[i]) = toNvShared(rotation).rotateInv(toNvShared(cellPoints[i]));
cellPoints[i].x *= (1.0f / scale.x);
cellPoints[i].y *= (1.0f / scale.y);
cellPoints[i].z *= (1.0f / scale.z);
}
/**
Prebuild accelerator structure
*/
BooleanEvaluator eval;
BooleanEvaluator voronoiMeshEval;
BBoxBasedAccelerator spAccel = BBoxBasedAccelerator(mesh, kBBoxBasedAcceleratorDefaultResolution);
std::vector<std::vector<std::pair<int32_t, int32_t>>> neighbors;
const int32_t neighborCount = findCellBasePlanes(cellPoints, neighbors);
/**
Fracture
*/
int32_t parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId;
std::vector<uint32_t> newlyCreatedChunksIds;
for (uint32_t i = 0; i < cellPoints.size(); ++i)
{
Mesh* cell =
getCellMesh(eval, mPlaneIndexerOffset, i, cellPoints, neighbors, mInteriorMaterialId, cellPoints[i]);
if (cell == nullptr)
{
continue;
}
for (uint32_t v = 0; v < cell->getVerticesCount(); ++v)
{
cell->getVerticesWritable()[v].p.x *= scale.x;
cell->getVerticesWritable()[v].p.y *= scale.y;
cell->getVerticesWritable()[v].p.z *= scale.z;
toNvShared(cell->getVerticesWritable()[v].p) = toNvShared(rotation).rotate(toNvShared(cell->getVerticesWritable()[v].p));
}
cell->recalculateBoundingBox();
DummyAccelerator dmAccel(cell->getFacetCount());
voronoiMeshEval.performBoolean(mesh, cell, &spAccel, &dmAccel, BooleanConfigurations::BOOLEAN_INTERSECTION());
Mesh* resultMesh = voronoiMeshEval.createNewMesh();
if (resultMesh)
{
uint32_t ncidx = createNewChunk(parentChunkId);
mChunkData[ncidx].isLeaf = true;
setChunkInfoMesh(mChunkData[ncidx], resultMesh);
newlyCreatedChunksIds.push_back(mChunkData[ncidx].chunkId);
}
eval.reset();
delete cell;
}
mChunkData[chunkInfoIndex].isLeaf = false;
if (replaceChunk)
{
deleteChunkSubhierarchy(chunkId, true);
}
mPlaneIndexerOffset += neighborCount;
if (mRemoveIslands)
{
for (auto chunkToCheck : newlyCreatedChunksIds)
{
islandDetectionAndRemoving(chunkToCheck);
}
}
return 0;
}
int32_t FractureToolImpl::slicing(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk,
RandomGeneratorBase* rnd)
{
if (conf.noise.amplitude != 0)
{
return slicingNoisy(chunkId, conf, replaceChunk, rnd);
}
if (replaceChunk && chunkId == 0)
{
return 1;
}
int32_t chunkInfoIndex = getChunkInfoIndex(chunkId);
if (chunkInfoIndex == -1)
{
return 1;
}
if (!mChunkData[chunkInfoIndex].isLeaf)
{
deleteChunkSubhierarchy(chunkId);
}
chunkInfoIndex = getChunkInfoIndex(chunkId);
Mesh* mesh = new MeshImpl(*reinterpret_cast<MeshImpl*>(mChunkData[chunkInfoIndex].getMesh()));
BooleanEvaluator bTool;
int32_t x_slices = conf.x_slices;
int32_t y_slices = conf.y_slices;
int32_t z_slices = conf.z_slices;
const nvidia::NvBounds3 sourceBBox = toNvShared(mesh->getBoundingBox());
NvVec3 center = {mesh->getBoundingBox().minimum.x, 0, 0};
float x_offset = (sourceBBox.maximum.x - sourceBBox.minimum.x) * (1.0f / (x_slices + 1));
float y_offset = (sourceBBox.maximum.y - sourceBBox.minimum.y) * (1.0f / (y_slices + 1));
float z_offset = (sourceBBox.maximum.z - sourceBBox.minimum.z) * (1.0f / (z_slices + 1));
center.x += x_offset;
NvVec3 dir = {1, 0, 0};
Mesh* slBox = getCuttingBox(center, dir, 20, 0, mInteriorMaterialId);
ChunkInfo ch;
ch.isLeaf = true;
ch.isChanged = true;
ch.flags = ChunkInfo::NO_FLAGS;
ch.parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId;
std::vector<Mesh*> xSlicedChunks;
std::vector<Mesh*> ySlicedChunks;
std::vector<uint32_t> newlyCreatedChunksIds;
/**
Slice along x direction
*/
for (int32_t slice = 0; slice < x_slices; ++slice)
{
NvVec3 randVect =
NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1);
NvVec3 lDir = dir + randVect * conf.angle_variations;
setCuttingBox(center, -lDir, slBox, 20, mPlaneIndexerOffset);
bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_INTERSECTION());
Mesh* xSlice = bTool.createNewMesh();
if (xSlice != nullptr)
{
xSlicedChunks.push_back(xSlice);
}
inverseNormalAndIndices(slBox);
++mPlaneIndexerOffset;
bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_DIFFERENCE());
Mesh* result = bTool.createNewMesh();
delete mesh;
mesh = result;
if (mesh == nullptr)
{
break;
}
center.x += x_offset + (rnd->getRandomValue()) * conf.offset_variations * x_offset;
}
if (mesh != nullptr)
{
xSlicedChunks.push_back(mesh);
}
for (uint32_t chunk = 0; chunk < xSlicedChunks.size(); ++chunk)
{
center = NvVec3(0, sourceBBox.minimum.y, 0);
center.y += y_offset;
dir = NvVec3(0, 1, 0);
mesh = xSlicedChunks[chunk];
for (int32_t slice = 0; slice < y_slices; ++slice)
{
NvVec3 randVect =
NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1);
NvVec3 lDir = dir + randVect * conf.angle_variations;
setCuttingBox(center, -lDir, slBox, 20, mPlaneIndexerOffset);
bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_INTERSECTION());
Mesh* ySlice = bTool.createNewMesh();
if (ySlice != nullptr)
{
ySlicedChunks.push_back(ySlice);
}
inverseNormalAndIndices(slBox);
++mPlaneIndexerOffset;
bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_DIFFERENCE());
Mesh* result = bTool.createNewMesh();
delete mesh;
mesh = result;
if (mesh == nullptr)
{
break;
}
center.y += y_offset + (rnd->getRandomValue()) * conf.offset_variations * y_offset;
}
if (mesh != nullptr)
{
ySlicedChunks.push_back(mesh);
}
}
for (uint32_t chunk = 0; chunk < ySlicedChunks.size(); ++chunk)
{
center = NvVec3(0, 0, sourceBBox.minimum.z);
center.z += z_offset;
dir = NvVec3(0, 0, 1);
mesh = ySlicedChunks[chunk];
for (int32_t slice = 0; slice < z_slices; ++slice)
{
NvVec3 randVect =
NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1);
NvVec3 lDir = dir + randVect * conf.angle_variations;
setCuttingBox(center, -lDir, slBox, 20, mPlaneIndexerOffset);
bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_INTERSECTION());
Mesh* ySlice = bTool.createNewMesh();
if (ySlice != nullptr)
{
setChunkInfoMesh(ch, ySlice);
ch.chunkId = createId();
newlyCreatedChunksIds.push_back(ch.chunkId);
mChunkData.push_back(ch);
}
inverseNormalAndIndices(slBox);
++mPlaneIndexerOffset;
bTool.performFastCutting(mesh, slBox, BooleanConfigurations::BOOLEAN_DIFFERENCE());
Mesh* result = bTool.createNewMesh();
delete mesh;
mesh = result;
if (mesh == nullptr)
{
break;
}
center.z += z_offset + (rnd->getRandomValue()) * conf.offset_variations * z_offset;
}
if (mesh != nullptr)
{
setChunkInfoMesh(ch, mesh);
ch.chunkId = createId();
newlyCreatedChunksIds.push_back(ch.chunkId);
mChunkData.push_back(ch);
}
}
delete slBox;
mChunkData[chunkInfoIndex].isLeaf = false;
if (replaceChunk)
{
deleteChunkSubhierarchy(chunkId, true);
}
if (mRemoveIslands)
{
for (auto chunkToCheck : newlyCreatedChunksIds)
{
islandDetectionAndRemoving(chunkToCheck);
}
}
return 0;
}
int32_t FractureToolImpl::slicingNoisy(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk,
RandomGeneratorBase* rnd)
{
if (replaceChunk && chunkId == 0)
{
return 1;
}
int32_t chunkInfoIndex = getChunkInfoIndex(chunkId);
if (chunkInfoIndex == -1)
{
return 1;
}
if (!mChunkData[chunkInfoIndex].isLeaf)
{
deleteChunkSubhierarchy(chunkId);
}
chunkInfoIndex = getChunkInfoIndex(chunkId);
Mesh* mesh = new MeshImpl(*reinterpret_cast<MeshImpl*>(mChunkData[chunkInfoIndex].getMesh()));
const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld();
BooleanEvaluator bTool;
int32_t x_slices = conf.x_slices;
int32_t y_slices = conf.y_slices;
int32_t z_slices = conf.z_slices;
const nvidia::NvBounds3 sourceBBox = toNvShared(mesh->getBoundingBox());
NvVec3 center = NvVec3(mesh->getBoundingBox().minimum.x, 0, 0);
float x_offset = (sourceBBox.maximum.x - sourceBBox.minimum.x) * (1.0f / (x_slices + 1));
float y_offset = (sourceBBox.maximum.y - sourceBBox.minimum.y) * (1.0f / (y_slices + 1));
float z_offset = (sourceBBox.maximum.z - sourceBBox.minimum.z) * (1.0f / (z_slices + 1));
NvVec3 resolution(tm.s / conf.noise.samplingInterval.x,
tm.s / conf.noise.samplingInterval.y,
tm.s / conf.noise.samplingInterval.z);
center.x += x_offset;
NvVec3 dir(1, 0, 0);
Mesh* slBox = nullptr;
ChunkInfo ch;
ch.isLeaf = true;
ch.isChanged = true;
ch.flags = ChunkInfo::NO_FLAGS;
ch.parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId;
std::vector<Mesh*> xSlicedChunks;
std::vector<Mesh*> ySlicedChunks;
std::vector<uint32_t> newlyCreatedChunksIds;
float noisyPartSize = 1.2f;
// int32_t acceleratorRes = 8;
/**
Slice along x direction
*/
for (int32_t slice = 0; slice < x_slices; ++slice)
{
NvVec3 randVect =
NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1);
NvVec3 lDir = dir + randVect * conf.angle_variations;
slBox = getNoisyCuttingBoxPair(center, lDir, 40, noisyPartSize, resolution,
mPlaneIndexerOffset, conf.noise.amplitude,
conf.noise.frequency, conf.noise.octaveNumber, rnd->getRandomValue(),
mInteriorMaterialId);
// DummyAccelerator accel(mesh->getFacetCount());
SweepingAccelerator accel(mesh);
SweepingAccelerator dummy(slBox);
bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE());
Mesh* xSlice = bTool.createNewMesh();
if (xSlice != nullptr)
{
xSlicedChunks.push_back(xSlice);
}
inverseNormalAndIndices(slBox);
++mPlaneIndexerOffset;
bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION());
Mesh* result = bTool.createNewMesh();
delete slBox;
delete mesh;
mesh = result;
if (mesh == nullptr)
{
break;
}
center.x += x_offset + (rnd->getRandomValue()) * conf.offset_variations * x_offset;
}
if (mesh != nullptr)
{
xSlicedChunks.push_back(mesh);
}
slBox = getCuttingBox(center, dir, 20, 0, mInteriorMaterialId);
uint32_t slicedChunkSize = xSlicedChunks.size();
for (uint32_t chunk = 0; chunk < slicedChunkSize; ++chunk)
{
center = NvVec3(0, sourceBBox.minimum.y, 0);
center.y += y_offset;
dir = NvVec3(0, 1, 0);
mesh = xSlicedChunks[chunk];
for (int32_t slice = 0; slice < y_slices; ++slice)
{
NvVec3 randVect =
NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1);
NvVec3 lDir = dir + randVect * conf.angle_variations;
slBox = getNoisyCuttingBoxPair(center, lDir, 40, noisyPartSize, resolution,
mPlaneIndexerOffset, conf.noise.amplitude,
conf.noise.frequency, conf.noise.octaveNumber, rnd->getRandomValue(),
mInteriorMaterialId);
// DummyAccelerator accel(mesh->getFacetCount());
SweepingAccelerator accel(mesh);
SweepingAccelerator dummy(slBox);
bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE());
Mesh* ySlice = bTool.createNewMesh();
if (ySlice != nullptr)
{
ySlicedChunks.push_back(ySlice);
}
inverseNormalAndIndices(slBox);
++mPlaneIndexerOffset;
bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION());
Mesh* result = bTool.createNewMesh();
delete slBox;
delete mesh;
mesh = result;
if (mesh == nullptr)
{
break;
}
center.y += y_offset + (rnd->getRandomValue()) * conf.offset_variations * y_offset;
}
if (mesh != nullptr)
{
ySlicedChunks.push_back(mesh);
}
}
for (uint32_t chunk = 0; chunk < ySlicedChunks.size(); ++chunk)
{
center = NvVec3(0, 0, sourceBBox.minimum.z);
center.z += z_offset;
dir = NvVec3(0, 0, 1);
mesh = ySlicedChunks[chunk];
for (int32_t slice = 0; slice < z_slices; ++slice)
{
NvVec3 randVect =
NvVec3(2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1, 2 * rnd->getRandomValue() - 1);
NvVec3 lDir = dir + randVect * conf.angle_variations;
slBox = getNoisyCuttingBoxPair(center, lDir, 40, noisyPartSize, resolution,
mPlaneIndexerOffset, conf.noise.amplitude,
conf.noise.frequency, conf.noise.octaveNumber, rnd->getRandomValue(),
mInteriorMaterialId);
// DummyAccelerator accel(mesh->getFacetCount());
SweepingAccelerator accel(mesh);
SweepingAccelerator dummy(slBox);
bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE());
Mesh* ySlice = bTool.createNewMesh();
if (ySlice != nullptr)
{
setChunkInfoMesh(ch, ySlice);
ch.chunkId = createId();
mChunkData.push_back(ch);
newlyCreatedChunksIds.push_back(ch.chunkId);
}
inverseNormalAndIndices(slBox);
++mPlaneIndexerOffset;
bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION());
Mesh* result = bTool.createNewMesh();
delete mesh;
delete slBox;
mesh = result;
if (mesh == nullptr)
{
break;
}
center.z += z_offset + (rnd->getRandomValue()) * conf.offset_variations * z_offset;
}
if (mesh != nullptr)
{
setChunkInfoMesh(ch, mesh);
ch.chunkId = createId();
newlyCreatedChunksIds.push_back(ch.chunkId);
mChunkData.push_back(ch);
}
}
// delete slBox;
mChunkData[chunkInfoIndex].isLeaf = false;
if (replaceChunk)
{
deleteChunkSubhierarchy(chunkId, true);
}
if (mRemoveIslands)
{
for (auto chunkToCheck : newlyCreatedChunksIds)
{
islandDetectionAndRemoving(chunkToCheck);
}
}
return 0;
}
int32_t FractureToolImpl::cut(uint32_t chunkId, const NvcVec3& normal, const NvcVec3& point,
const NoiseConfiguration& noise, bool replaceChunk, RandomGeneratorBase* rnd)
{
if (replaceChunk && chunkId == 0)
{
return 1;
}
int32_t chunkInfoIndex = getChunkInfoIndex(chunkId);
if (chunkInfoIndex == -1)
{
return 1;
}
if (!mChunkData[chunkInfoIndex].isLeaf)
{
deleteChunkSubhierarchy(chunkId);
}
chunkInfoIndex = getChunkInfoIndex(chunkId);
Mesh* mesh = new MeshImpl(*reinterpret_cast<MeshImpl*>(mChunkData[chunkInfoIndex].getMesh()));
BooleanEvaluator bTool;
const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld();
ChunkInfo ch;
ch.chunkId = -1;
ch.isLeaf = true;
ch.isChanged = true;
ch.flags = ChunkInfo::NO_FLAGS;
ch.parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId;
float noisyPartSize = 1.2f;
NvVec3 resolution(tm.s / noise.samplingInterval.x,
tm.s / noise.samplingInterval.y,
tm.s / noise.samplingInterval.z);
// Perform cut
Mesh* slBox = getNoisyCuttingBoxPair(toNvShared(tm.invTransformPos(point)),
toNvShared(normal), // tm doesn't change normals (up to normalization)
40, noisyPartSize, resolution,
mPlaneIndexerOffset, noise.amplitude, noise.frequency,
noise.octaveNumber, rnd->getRandomValue(), mInteriorMaterialId);
SweepingAccelerator accel(mesh);
SweepingAccelerator dummy(slBox);
bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_DIFFERENCE());
setChunkInfoMesh(ch, bTool.createNewMesh());
inverseNormalAndIndices(slBox);
++mPlaneIndexerOffset;
bTool.performBoolean(mesh, slBox, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION());
Mesh* result = bTool.createNewMesh();
delete slBox;
delete mesh;
mesh = result;
if (mesh == 0) // Return if it doesn't cut specified chunk
{
return 1;
}
if (!mChunkData[chunkInfoIndex].isLeaf)
{
deleteChunkSubhierarchy(chunkId);
}
chunkInfoIndex = getChunkInfoIndex(chunkId);
int32_t firstChunkId = -1;
if (ch.getMesh() != 0)
{
ch.chunkId = createId();
mChunkData.push_back(ch);
firstChunkId = ch.chunkId;
}
if (mesh != 0)
{
ch.chunkId = createId();
setChunkInfoMesh(ch, mesh);
mChunkData.push_back(ch);
}
mChunkData[chunkInfoIndex].isLeaf = false;
if (replaceChunk)
{
deleteChunkSubhierarchy(chunkId, true);
}
if (mRemoveIslands && firstChunkId >= 0)
{
islandDetectionAndRemoving(firstChunkId);
if (mesh != 0)
{
islandDetectionAndRemoving(ch.chunkId);
}
}
return 0;
}
bool CmpVec::operator()(const NvVec3& v1, const NvVec3& v2) const
{
auto v = (v2 - v1).abs();
if (v.x < 1e-5)
{
if (v.y < 1e-5)
{
return v1.z < v2.z;
}
return v1.y < v2.y;
}
return v1.x < v2.x;
}
int32_t FractureToolImpl::cutout(uint32_t chunkId, CutoutConfiguration conf, bool replaceChunk, RandomGeneratorBase* rnd)
{
if ((replaceChunk && chunkId == 0) || conf.cutoutSet == nullptr)
{
return 1;
}
int32_t chunkInfoIndex = getChunkInfoIndex(chunkId);
if (chunkInfoIndex == -1)
{
return 1;
}
if (!mChunkData[chunkInfoIndex].isLeaf)
{
deleteChunkSubhierarchy(chunkId);
}
chunkInfoIndex = getChunkInfoIndex(chunkId);
Nv::Blast::CutoutSet& cutoutSet = *conf.cutoutSet;
const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld();
Mesh* mesh = new MeshImpl(*reinterpret_cast<MeshImpl*>(mChunkData[chunkInfoIndex].getMesh()));
float extrusionLength = toNvShared(mesh->getBoundingBox()).getDimensions().magnitude();
auto scale = toNvShared(conf.scale);
conf.transform.p = tm.invTransformPos(conf.transform.p);
if (scale.x < 0.f || scale.y < 0.f)
{
scale = { extrusionLength, extrusionLength };
}
if (conf.isRelativeTransform)
{
toNvShared(conf.transform.p) += toNvShared(mesh->getBoundingBox()).getCenter() / tm.s;
}
conf.noise.samplingInterval = conf.noise.samplingInterval / tm.s;
float xDim = cutoutSet.getDimensions().x;
float yDim = cutoutSet.getDimensions().y;
if (conf.cutoutSet->isPeriodic()) // cutout with periodic boundary do not support noise and conicity
{
conf.aperture = 0.f;
conf.noise.amplitude = 0.f;
}
BooleanEvaluator bTool;
ChunkInfo ch;
ch.isLeaf = true;
ch.isChanged = true;
ch.flags = ChunkInfo::NO_FLAGS;
ch.parentChunkId = replaceChunk ? mChunkData[chunkInfoIndex].parentChunkId : chunkId;
std::vector<uint32_t> newlyCreatedChunksIds;
SharedFacesMap sharedFacesMap;
std::vector<std::vector<NvVec3> > verts;
std::vector<std::set<int32_t> > smoothingGroups;
std::vector<uint32_t> cutoutStarts;
for (uint32_t c = 0; c < cutoutSet.getCutoutCount(); c++)
{
cutoutStarts.push_back(verts.size());
for (uint32_t l = 0; l < cutoutSet.getCutoutLoopCount(c); l++)
{
uint32_t vertCount = cutoutSet.getCutoutVertexCount(c, l);
verts.push_back(std::vector<NvVec3>(vertCount));
smoothingGroups.push_back(std::set<int32_t>());
for (uint32_t v = 0; v < vertCount; v++)
{
auto vert = cutoutSet.getCutoutVertex(c, l, v);
vert.x = (vert.x / xDim - 0.5f) * scale.x;
vert.y = (vert.y / yDim - 0.5f) * scale.y;
verts.back()[v] = toNvShared(vert);
if (cutoutSet.isCutoutVertexToggleSmoothingGroup(c, l, v))
{
smoothingGroups.back().insert(v);
}
}
}
}
float dimension = scale.magnitude();
float conicityMultiplierBot =
1.f + 2.f * extrusionLength / dimension *
nvidia::NvTan(nvidia::NvClamp(conf.aperture, -179.f, 179.f) * nvidia::NvPi / 360.f);
float conicityMultiplierTop = 2.f - conicityMultiplierBot;
float heightBot = extrusionLength, heightTop = extrusionLength;
if (conicityMultiplierBot < 0.f)
{
conicityMultiplierBot = 0.f;
heightBot = 0.5f * dimension / std::abs(nvidia::NvTan(conf.aperture * nvidia::NvPi / 360.f));
}
if (conicityMultiplierTop < 0.f)
{
conicityMultiplierTop = 0.f;
heightTop = 0.5f * dimension / std::abs(nvidia::NvTan(conf.aperture * nvidia::NvPi / 360.f));
}
uint32_t seed = rnd->getRandomValue();
buildCuttingConeFaces(conf, verts, heightBot, heightTop, conicityMultiplierBot, conicityMultiplierTop,
mPlaneIndexerOffset, seed, mInteriorMaterialId, sharedFacesMap);
std::vector<std::vector<Mesh*> > cutoutMeshes;
for (uint32_t c = 0; c < cutoutSet.getCutoutCount(); c++)
{
cutoutMeshes.push_back(std::vector<Mesh*>());
for (uint32_t l = 0; l < cutoutSet.getCutoutLoopCount(c); l++)
{
if (verts[cutoutStarts[c] + l].size() < 4)
{
continue;
}
cutoutMeshes.back().push_back(
getCuttingCone(conf, verts[cutoutStarts[c] + l], smoothingGroups[cutoutStarts[c] + l], heightBot,
heightTop, conicityMultiplierBot, conicityMultiplierTop, mPlaneIndexerOffset, seed,
mInteriorMaterialId, sharedFacesMap, l != 0));
}
}
std::stack<std::pair<int32_t, int32_t> > cellsStack;
std::set<std::pair<int32_t, int32_t> > visited;
cellsStack.push(std::make_pair(0, 0));
while (!cellsStack.empty())
{
auto cell = cellsStack.top();
auto transformedCell = toNvShared(conf.transform).rotate(NvVec3(cell.first * scale.x, cell.second * scale.y, 0));
cellsStack.pop();
if (visited.find(cell) != visited.end())
{
continue;
}
visited.insert(cell);
bool hasCutout = false;
for (uint32_t c = 0; c < cutoutMeshes.size(); c++)
{
setChunkInfoMesh(ch, nullptr);
for (uint32_t l = 0; l < cutoutMeshes[c].size(); l++)
{
Mesh* cutoutMesh = cutoutMeshes[c][l];
if (cutoutMesh == nullptr)
{
continue;
}
auto vertices = cutoutMesh->getVerticesWritable();
for (uint32_t v = 0; v < cutoutMesh->getVerticesCount(); v++)
{
toNvShared(vertices[v].p) += transformedCell;
}
toNvShared(cutoutMesh->getBoundingBoxWritable().minimum) += transformedCell;
toNvShared(cutoutMesh->getBoundingBoxWritable().maximum) += transformedCell;
if (l == 0)
{
SweepingAccelerator accel(mesh);
SweepingAccelerator dummy(cutoutMesh);
bTool.performBoolean(mesh, cutoutMesh, &accel, &dummy, BooleanConfigurations::BOOLEAN_INTERSECTION());
setChunkInfoMesh(ch, bTool.createNewMesh());
}
else
{
SweepingAccelerator accel(ch.getMesh());
SweepingAccelerator dummy(cutoutMesh);
bTool.performBoolean(ch.getMesh(), cutoutMesh, &accel, &dummy,
BooleanConfigurations::BOOLEAN_DIFFERENCE());
setChunkInfoMesh(ch, bTool.createNewMesh());
}
for (uint32_t v = 0; v < cutoutMesh->getVerticesCount(); v++)
{
toNvShared(vertices[v].p) -= transformedCell;
}
toNvShared(cutoutMesh->getBoundingBoxWritable().minimum )-= transformedCell;
toNvShared(cutoutMesh->getBoundingBoxWritable().maximum) -= transformedCell;
}
if (ch.getMesh() != 0)
{
ch.chunkId = createId();
newlyCreatedChunksIds.push_back(ch.chunkId);
mChunkData.push_back(ch);
hasCutout = true;
}
}
if (hasCutout && cutoutSet.isPeriodic())
{
for (int32_t i = 0; i < 4; ++i)
{
const int32_t i0 = i & 1;
const int32_t i1 = (i >> 1) & 1;
auto newCell = std::make_pair(cell.first + i0 - i1, cell.second + i0 + i1 - 1);
if (visited.find(newCell) == visited.end())
{
cellsStack.push(newCell);
}
}
}
}
for (uint32_t c = 0; c < cutoutMeshes.size(); c++)
{
for (uint32_t l = 0; l < cutoutMeshes[c].size(); l++)
{
SAFE_DELETE(cutoutMeshes[c][l]);
}
}
SAFE_DELETE(mesh);
mChunkData[chunkInfoIndex].isLeaf = false;
if (replaceChunk)
{
deleteChunkSubhierarchy(chunkId, true);
}
if (mRemoveIslands)
{
for (auto chunkToCheck : newlyCreatedChunksIds)
{
islandDetectionAndRemoving(chunkToCheck);
}
}
return 0;
}
int32_t FractureToolImpl::getChunkInfoIndex(int32_t chunkId) const
{
for (uint32_t i = 0; i < mChunkData.size(); ++i)
{
if (mChunkData[i].chunkId == chunkId)
{
return i;
}
}
return -1;
}
int32_t FractureToolImpl::getChunkDepth(int32_t chunkId) const
{
int32_t chunkInfoIndex = getChunkInfoIndex(chunkId);
if (chunkInfoIndex == -1)
{
return -1;
}
int32_t depth = 0;
while (mChunkData[chunkInfoIndex].parentChunkId != -1)
{
++depth;
chunkInfoIndex = getChunkInfoIndex(mChunkData[chunkInfoIndex].parentChunkId);
}
return depth;
}
uint32_t FractureToolImpl::getChunksIdAtDepth(uint32_t depth, int32_t*& chunkIds) const
{
std::vector<int32_t> _chunkIds;
for (uint32_t i = 0; i < mChunkData.size(); ++i)
{
if (getChunkDepth(mChunkData[i].chunkId) == (int32_t)depth)
{
_chunkIds.push_back(mChunkData[i].chunkId);
}
}
chunkIds = new int32_t[_chunkIds.size()];
memcpy(chunkIds, _chunkIds.data(), _chunkIds.size() * sizeof(int32_t));
return (uint32_t)_chunkIds.size();
}
bool FractureToolImpl::setSourceMeshes(Mesh const * const * meshes, uint32_t meshesSize, const int32_t* ids /* = nullptr */)
{
if (meshes == nullptr)
{
return false;
}
reset();
for (uint32_t m = 0; m < meshesSize; m++)
{
const auto mesh = meshes[m];
const int32_t chunkId = (ids ? ids[m] : -1);
const int32_t id = setChunkMesh(mesh, -1, chunkId);
// if any mesh fails to get set up correctly,
// wipe the data so it isn't in a bad state and report failure
if (id < 0)
{
reset();
return false;
}
}
// all source meshes were set up correctly, report success
return true;
}
int32_t FractureToolImpl::setChunkMesh(const Mesh* meshInput, int32_t parentId, int32_t chunkId /* = -1 */)
{
if (chunkId < 0)
{
// allocate a new chunk ID
chunkId = createId();
if (chunkId < 0)
{
return -1;
}
}
else
{
// make sure the supplied chunk ID gets reserved
if (!reserveId(chunkId))
{
return -1;
}
}
const int32_t parentInfoIndex = getChunkInfoIndex(parentId);
if (meshInput == nullptr || (parentInfoIndex == -1 && parentId != -1))
{
return -1;
}
mChunkData.push_back(ChunkInfo());
auto& chunk = mChunkData.back();
chunk.chunkId = chunkId;
chunk.parentChunkId = parentId;
chunk.isLeaf = true;
chunk.isChanged = true;
chunk.flags = ChunkInfo::NO_FLAGS;
/**
Set mesh; move to origin and scale to unit cube
*/
Mesh* mesh = new MeshImpl(*reinterpret_cast<const MeshImpl*>(meshInput));
setChunkInfoMesh(chunk, mesh, false);
if ((size_t)parentInfoIndex < mChunkData.size())
{
mChunkData[parentInfoIndex].isLeaf = false;
}
// Make sure our fracturing surface ID base is greater than any existing ID
for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
{
const int64_t splitId = std::abs(mesh->getFacet(i)->userData);
mPlaneIndexerOffset = std::max(mPlaneIndexerOffset, splitId + 1);
}
return chunk.chunkId;
}
void FractureToolImpl::release()
{
delete this;
}
void FractureToolImpl::reset()
{
for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i)
{
delete mChunkPostprocessors[i];
}
mChunkPostprocessors.clear();
for (uint32_t i = 0; i < mChunkData.size(); ++i)
{
delete mChunkData[i].getMesh();
}
mChunkData.clear();
mPlaneIndexerOffset = 1;
mNextChunkId = 0;
mChunkIdsUsed.clear();
mInteriorMaterialId = kMaterialInteriorId;
}
void FractureToolImpl::setInteriorMaterialId(int32_t materialId)
{
mInteriorMaterialId = materialId;
}
bool FractureToolImpl::isAncestorForChunk(int32_t ancestorId, int32_t chunkId)
{
if (ancestorId == chunkId)
{
return false;
}
while (chunkId != -1)
{
if (ancestorId == chunkId)
{
return true;
}
const int32_t chunkInfoIndex = getChunkInfoIndex(chunkId);
if (chunkInfoIndex == -1)
{
return false;
}
chunkId = mChunkData[chunkInfoIndex].parentChunkId;
}
return false;
}
bool FractureToolImpl::deleteChunkSubhierarchy(int32_t chunkId, bool deleteRoot /*= false*/)
{
std::vector<int32_t> chunkToDelete;
for (uint32_t i = 0; i < mChunkData.size(); ++i)
{
if (isAncestorForChunk(chunkId, mChunkData[i].chunkId) || (deleteRoot && chunkId == mChunkData[i].chunkId))
{
chunkToDelete.push_back(i);
}
}
for (int32_t i = (int32_t)chunkToDelete.size() - 1; i >= 0; --i)
{
int32_t m = chunkToDelete[i];
delete mChunkData[m].getMesh();
std::swap(mChunkData.back(), mChunkData[m]);
mChunkData.pop_back();
}
markLeaves();
return chunkToDelete.size() > 0;
}
void FractureToolImpl::finalizeFracturing()
{
std::vector<Triangulator*> oldTriangulators = mChunkPostprocessors;
std::map<int32_t, int32_t> chunkIdToTriangulator;
std::set<uint32_t> newChunkMask;
for (uint32_t i = 0; i < oldTriangulators.size(); ++i)
{
chunkIdToTriangulator[oldTriangulators[i]->getParentChunkId()] = i;
}
mChunkPostprocessors.clear();
mChunkPostprocessors.resize(mChunkData.size());
newChunkMask.insert(0xffffffff); // To trigger masking mode, if newChunkMask will happen to be empty, all UVs will
// be updated.
for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i)
{
auto it = chunkIdToTriangulator.find(mChunkData[i].chunkId);
if (mChunkData[i].isChanged || it == chunkIdToTriangulator.end())
{
if (it != chunkIdToTriangulator.end())
{
delete oldTriangulators[it->second];
oldTriangulators[it->second] = nullptr;
}
mChunkPostprocessors[i] = new Triangulator();
mChunkPostprocessors[i]->triangulate(mChunkData[i].getMesh());
mChunkPostprocessors[i]->getParentChunkId() = mChunkData[i].chunkId;
newChunkMask.insert(mChunkData[i].chunkId);
mChunkData[i].isChanged = false;
}
else
{
mChunkPostprocessors[i] = oldTriangulators[it->second];
}
}
std::vector<int32_t> badOnes;
for (uint32_t i = 0; i < mChunkPostprocessors.size(); ++i)
{
if (mChunkPostprocessors[i]->getBaseMesh().empty())
{
badOnes.push_back(i);
}
}
for (int32_t i = (int32_t)badOnes.size() - 1; i >= 0; --i)
{
int32_t chunkId = mChunkData[badOnes[i]].chunkId;
for (uint32_t j = 0; j < mChunkData.size(); ++j)
{
if (mChunkData[j].parentChunkId == chunkId)
mChunkData[j].parentChunkId = mChunkData[badOnes[i]].parentChunkId;
}
std::swap(mChunkPostprocessors[badOnes[i]], mChunkPostprocessors.back());
mChunkPostprocessors.pop_back();
std::swap(mChunkData[badOnes[i]], mChunkData.back());
mChunkData.pop_back();
}
if (!mChunkPostprocessors.empty()) // Failsafe to prevent infinite loop (leading to stack overflow)
{
fitAllUvToRect(1.0f, newChunkMask);
}
}
uint32_t FractureToolImpl::getChunkCount() const
{
return (uint32_t)mChunkData.size();
}
const ChunkInfo& FractureToolImpl::getChunkInfo(int32_t chunkInfoIndex)
{
return mChunkData[chunkInfoIndex];
}
uint32_t FractureToolImpl::getBaseMesh(int32_t chunkInfoIndex, Triangle*& output)
{
NVBLAST_ASSERT(mChunkPostprocessors.size() > 0);
if (mChunkPostprocessors.size() == 0)
{
return 0; // finalizeFracturing() should be called before getting mesh!
}
auto& baseMesh = mChunkPostprocessors[chunkInfoIndex]->getBaseMesh();
output = new Triangle[baseMesh.size()];
memcpy(output, baseMesh.data(), baseMesh.size() * sizeof(Triangle));
/* Scale mesh back */
const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld();
for (uint32_t i = 0; i < baseMesh.size(); ++i)
{
Triangle& triangle = output[i];
triangle.a.p = tm.transformPos(triangle.a.p);
triangle.b.p = tm.transformPos(triangle.b.p);
triangle.c.p = tm.transformPos(triangle.c.p);
}
return baseMesh.size();
}
uint32_t FractureToolImpl::updateBaseMesh(int32_t chunkInfoIndex, Triangle* output)
{
NVBLAST_ASSERT(mChunkPostprocessors.size() > 0);
if (mChunkPostprocessors.size() == 0)
{
return 0; // finalizeFracturing() should be called before getting mesh!
}
auto& baseMesh = mChunkPostprocessors[chunkInfoIndex]->getBaseMesh();
memcpy(output, baseMesh.data(), baseMesh.size() * sizeof(Triangle));
/* Scale mesh back */
const TransformST& tm = mChunkData[chunkInfoIndex].getTmToWorld();
for (uint32_t i = 0; i < baseMesh.size(); ++i)
{
Triangle& triangle = output[i];
triangle.a.p = tm.transformPos(triangle.a.p);
triangle.b.p = tm.transformPos(triangle.b.p);
triangle.c.p = tm.transformPos(triangle.c.p);
}
return baseMesh.size();
}
float getVolume(std::vector<Triangle>& triangles)
{
if (triangles.size() == 0)
{
return 0.0f;
}
// Find an approximate centroid for a more accurate calculation
NvcVec3 centroid = { 0.0f, 0.0f, 0.0f };
for (size_t i = 0; i < triangles.size(); ++i)
{
centroid = centroid + triangles[i].a.p + triangles[i].b.p + triangles[i].c.p;
}
centroid = centroid / (3 * triangles.size());
float volume = 0.0f;
for (size_t i = 0; i < triangles.size(); ++i)
{
const NvcVec3 a = triangles[i].a.p - centroid;
const NvcVec3 b = triangles[i].b.p - centroid;
const NvcVec3 c = triangles[i].c.p - centroid;
volume +=
(a.x * b.y * c.z - a.x * b.z * c.y - a.y * b.x * c.z + a.y * b.z * c.x + a.z * b.x * c.y - a.z * b.y * c.x);
}
return (1.0f / 6.0f) * std::abs(volume);
}
float FractureToolImpl::getMeshOverlap(const Mesh& meshA, const Mesh& meshB)
{
BooleanEvaluator bTool;
bTool.performBoolean(&meshA, &meshB, BooleanConfigurations::BOOLEAN_INTERSECTION());
Mesh* result = bTool.createNewMesh();
if (result == nullptr)
{
return 0.0f;
}
Triangulator postProcessor;
postProcessor.triangulate(&meshA);
float baseVolume = getVolume(postProcessor.getBaseMesh());
if (baseVolume == 0)
{
return 0.0f;
}
postProcessor.triangulate(result);
float intrsVolume = getVolume(postProcessor.getBaseMesh());
delete result;
return intrsVolume / baseVolume;
}
void weldVertices(std::map<Vertex, uint32_t, VrtComp>& vertexMapping, std::vector<Vertex>& vertexBuffer,
std::vector<uint32_t>& indexBuffer, std::vector<Triangle>& trb)
{
for (uint32_t i = 0; i < trb.size(); ++i)
{
auto it = vertexMapping.find(trb[i].a);
if (it == vertexMapping.end())
{
indexBuffer.push_back(static_cast<uint32_t>(vertexBuffer.size()));
vertexMapping[trb[i].a] = static_cast<uint32_t>(vertexBuffer.size());
vertexBuffer.push_back(trb[i].a);
}
else
{
indexBuffer.push_back(it->second);
}
it = vertexMapping.find(trb[i].b);
if (it == vertexMapping.end())
{
indexBuffer.push_back(static_cast<uint32_t>(vertexBuffer.size()));
vertexMapping[trb[i].b] = static_cast<uint32_t>(vertexBuffer.size());
vertexBuffer.push_back(trb[i].b);
}
else
{
indexBuffer.push_back(it->second);
}
it = vertexMapping.find(trb[i].c);
if (it == vertexMapping.end())
{
indexBuffer.push_back(static_cast<uint32_t>(vertexBuffer.size()));
vertexMapping[trb[i].c] = static_cast<uint32_t>(vertexBuffer.size());
vertexBuffer.push_back(trb[i].c);
}
else
{
indexBuffer.push_back(it->second);
}
}
}
void FractureToolImpl::setRemoveIslands(bool isRemoveIslands)
{
mRemoveIslands = isRemoveIslands;
}
int32_t FractureToolImpl::islandDetectionAndRemoving(int32_t chunkId, bool createAtNewDepth)
{
if (chunkId == 0 && createAtNewDepth == false)
{
return 0;
}
int32_t chunkInfoIndex = getChunkInfoIndex(chunkId);
Triangulator prc;
prc.triangulate(mChunkData[chunkInfoIndex].getMesh());
Mesh* chunk = mChunkData[chunkInfoIndex].getMesh();
std::vector<uint32_t>& mapping = prc.getBaseMapping();
std::vector<TriangleIndexed>& trs = prc.getBaseMeshIndexed();
std::vector<std::vector<uint32_t> > graph(prc.getWeldedVerticesCount());
std::vector<int32_t>& pm = prc.getPositionedMapping();
if (pm.size() == 0)
{
return 0;
}
/**
Chunk graph
*/
for (uint32_t i = 0; i < trs.size(); ++i)
{
graph[pm[trs[i].ea]].push_back(pm[trs[i].eb]);
graph[pm[trs[i].ea]].push_back(pm[trs[i].ec]);
graph[pm[trs[i].ec]].push_back(pm[trs[i].eb]);
graph[pm[trs[i].ec]].push_back(pm[trs[i].ea]);
graph[pm[trs[i].eb]].push_back(pm[trs[i].ea]);
graph[pm[trs[i].eb]].push_back(pm[trs[i].ec]);
}
for (uint32_t i = 0; i < chunk->getEdgesCount(); ++i)
{
int v1 = chunk->getEdges()[i].s;
int v2 = chunk->getEdges()[i].e;
v1 = pm[mapping[v1]];
v2 = pm[mapping[v2]];
graph[v1].push_back(v2);
graph[v2].push_back(v1);
}
/**
Walk graph, mark components
*/
std::vector<int32_t> comps(prc.getWeldedVerticesCount(), -1);
std::queue<uint32_t> que;
int32_t cComp = 0;
for (uint32_t i = 0; i < prc.getWeldedVerticesCount(); ++i)
{
int32_t to = pm[i];
if (comps[to] != -1)
continue;
que.push(to);
comps[to] = cComp;
while (!que.empty())
{
int32_t c = que.front();
que.pop();
for (uint32_t j = 0; j < graph[c].size(); ++j)
{
if (comps[graph[c][j]] == -1)
{
que.push(graph[c][j]);
comps[graph[c][j]] = cComp;
}
}
}
cComp++;
}
for (uint32_t i = 0; i < prc.getWeldedVerticesCount(); ++i)
{
int32_t to = pm[i];
comps[i] = comps[to];
}
std::vector<uint32_t> longComps(chunk->getVerticesCount());
for (uint32_t i = 0; i < chunk->getVerticesCount(); ++i)
{
int32_t to = mapping[i];
longComps[i] = comps[to];
}
if (cComp > 1)
{
std::vector<std::vector<Vertex> > compVertices(cComp);
std::vector<std::vector<Facet> > compFacets(cComp);
std::vector<std::vector<Edge> > compEdges(cComp);
std::vector<uint32_t> compVertexMapping(chunk->getVerticesCount(), 0);
const Vertex* vrts = chunk->getVertices();
for (uint32_t v = 0; v < chunk->getVerticesCount(); ++v)
{
int32_t vComp = comps[mapping[v]];
compVertexMapping[v] = static_cast<uint32_t>(compVertices[vComp].size());
compVertices[vComp].push_back(vrts[v]);
}
const Facet* fcb = chunk->getFacetsBuffer();
const Edge* edb = chunk->getEdges();
for (uint32_t fc = 0; fc < chunk->getFacetCount(); ++fc)
{
std::vector<uint32_t> edgesPerComp(cComp, 0);
for (uint32_t ep = fcb[fc].firstEdgeNumber; ep < fcb[fc].firstEdgeNumber + fcb[fc].edgesCount; ++ep)
{
int32_t vComp = comps[mapping[edb[ep].s]];
edgesPerComp[vComp]++;
compEdges[vComp].push_back({compVertexMapping[edb[ep].s], compVertexMapping[edb[ep].e]});
}
for (int32_t c = 0; c < cComp; ++c)
{
if (edgesPerComp[c] == 0)
{
continue;
}
compFacets[c].push_back(*chunk->getFacet(fc));
compFacets[c].back().edgesCount = edgesPerComp[c];
compFacets[c].back().firstEdgeNumber = static_cast<int32_t>(compEdges[c].size()) - edgesPerComp[c];
}
}
if (createAtNewDepth == false)
{
// We need to flag the chunk as changed, in case someone is calling this function directly
// Otherwise when called as part of automatic island removal, chunks are already flagged as changed
mChunkData[chunkInfoIndex].isChanged = true;
delete mChunkData[chunkInfoIndex].getMesh();
Mesh* newMesh0 =
new MeshImpl(compVertices[0].data(), compEdges[0].data(), compFacets[0].data(),
static_cast<uint32_t>(compVertices[0].size()), static_cast<uint32_t>(compEdges[0].size()),
static_cast<uint32_t>(compFacets[0].size()));
setChunkInfoMesh(mChunkData[chunkInfoIndex], newMesh0);
for (int32_t i = 1; i < cComp; ++i)
{
mChunkData.push_back(ChunkInfo(mChunkData[chunkInfoIndex]));
mChunkData.back().chunkId = createId();
Mesh* newMesh_i =
new MeshImpl(compVertices[i].data(), compEdges[i].data(), compFacets[i].data(),
static_cast<uint32_t>(compVertices[i].size()),
static_cast<uint32_t>(compEdges[i].size()), static_cast<uint32_t>(compFacets[i].size()));
setChunkInfoMesh(mChunkData.back(), newMesh_i);
}
}
else
{
deleteChunkSubhierarchy(chunkId);
for (int32_t i = 0; i < cComp; ++i)
{
uint32_t nc = createNewChunk(chunkId);
mChunkData[nc].isLeaf = true;
mChunkData[nc].flags = ChunkInfo::APPROXIMATE_BONDING;
Mesh* newMesh = new MeshImpl(compVertices[i].data(), compEdges[i].data(), compFacets[i].data(),
static_cast<uint32_t>(compVertices[i].size()),
static_cast<uint32_t>(compEdges[i].size()),
static_cast<uint32_t>(compFacets[i].size()));
setChunkInfoMesh(mChunkData[nc], newMesh);
}
mChunkData[chunkInfoIndex].isLeaf = false;
}
return cComp;
}
return 0;
}
uint32_t
FractureToolImpl::getBufferedBaseMeshes(Vertex*& vertexBuffer, uint32_t*& indexBuffer, uint32_t*& indexBufferOffsets)
{
std::map<Vertex, uint32_t, VrtComp> vertexMapping;
std::vector<Vertex> _vertexBuffer;
std::vector<uint32_t> _indexBuffer;
indexBufferOffsets = reinterpret_cast<uint32_t*>(NVBLAST_ALLOC((mChunkPostprocessors.size() + 1) * sizeof(uint32_t)));
for (uint32_t ch = 0; ch < mChunkPostprocessors.size(); ++ch)
{
const TransformST& tm = mChunkData[ch].getTmToWorld();
std::vector<Triangle> trb = mChunkPostprocessors[ch]->getBaseMesh();
for (uint32_t i = 0; i < trb.size(); ++i)
{
Triangle& tri = trb[i];
tri.a.p = tm.transformPos(tri.a.p);
tri.b.p = tm.transformPos(tri.b.p);
tri.c.p = tm.transformPos(tri.c.p);
}
indexBufferOffsets[ch] = _indexBuffer.size();
weldVertices(vertexMapping, _vertexBuffer, _indexBuffer, trb);
}
indexBufferOffsets[mChunkPostprocessors.size()] = _indexBuffer.size();
vertexBuffer = reinterpret_cast<Vertex*>(NVBLAST_ALLOC(_vertexBuffer.size() * sizeof(Vertex)));
indexBuffer = reinterpret_cast<uint32_t*>(NVBLAST_ALLOC(_indexBuffer.size() * sizeof(uint32_t)));
memcpy(vertexBuffer, _vertexBuffer.data(), _vertexBuffer.size() * sizeof(Vertex));
memcpy(indexBuffer, _indexBuffer.data(), _indexBuffer.size() * sizeof(uint32_t));
return _vertexBuffer.size();
}
int32_t FractureToolImpl::getChunkId(int32_t chunkInfoIndex) const
{
if (chunkInfoIndex < 0 || static_cast<uint32_t>(chunkInfoIndex) >= mChunkData.size())
{
return -1;
}
return mChunkData[chunkInfoIndex].chunkId;
}
int32_t FractureToolImpl::getInteriorMaterialId() const
{
return mInteriorMaterialId;
}
void FractureToolImpl::replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId)
{
for (auto& chunkData : mChunkData)
{
if (chunkData.getMesh())
{
chunkData.getMesh()->replaceMaterialId(oldMaterialId, newMaterialId);
}
}
}
uint32_t FractureToolImpl::stretchGroup(const std::vector<uint32_t>& grp, std::vector<std::vector<uint32_t> >& graph)
{
uint32_t parentChunkId = mChunkData[grp[0]].parentChunkId;
uint32_t newChunkIndex = createNewChunk(parentChunkId);
graph.push_back(std::vector<uint32_t>());
std::vector<Vertex> nVertices;
std::vector<Edge> nEdges;
std::vector<Facet> nFacets;
uint32_t offsetVertices = 0;
uint32_t offsetEdges = 0;
for (uint32_t i = 0; i < grp.size(); ++i)
{
mChunkData[grp[i]].parentChunkId = mChunkData[newChunkIndex].chunkId;
auto vr = mChunkData[grp[i]].getMesh()->getVertices();
auto ed = mChunkData[grp[i]].getMesh()->getEdges();
auto fc = mChunkData[grp[i]].getMesh()->getFacetsBuffer();
for (uint32_t v = 0; v < mChunkData[grp[i]].getMesh()->getVerticesCount(); ++v)
{
nVertices.push_back(vr[v]);
}
for (uint32_t v = 0; v < mChunkData[grp[i]].getMesh()->getEdgesCount(); ++v)
{
nEdges.push_back(ed[v]);
nEdges.back().s += offsetVertices;
nEdges.back().e += offsetVertices;
}
for (uint32_t v = 0; v < mChunkData[grp[i]].getMesh()->getFacetCount(); ++v)
{
nFacets.push_back(fc[v]);
nFacets.back().firstEdgeNumber += offsetEdges;
}
offsetEdges = nEdges.size();
offsetVertices = nVertices.size();
if (mChunkData[grp[i]].flags & ChunkInfo::APPROXIMATE_BONDING)
{
mChunkData[newChunkIndex].flags |= ChunkInfo::APPROXIMATE_BONDING;
}
}
std::vector<Facet> finalFacets;
std::set<int64_t> hasCutting;
for (uint32_t i = 0; i < nFacets.size(); ++i)
{
if (nFacets[i].userData != 0)
hasCutting.insert(nFacets[i].userData);
}
for (uint32_t i = 0; i < nFacets.size(); ++i)
{
// N.B. This can lead to open meshes for non-voronoi fracturing.
// We need to check if the opposing faces match exactly, or even better reconstruct parts that stick out.
if (nFacets[i].userData == 0 || (hasCutting.find(-nFacets[i].userData) == hasCutting.end()))
{
finalFacets.push_back(nFacets[i]);
}
}
Mesh* newMesh =
new MeshImpl(nVertices.data(), nEdges.data(), finalFacets.data(), static_cast<uint32_t>(nVertices.size()),
static_cast<uint32_t>(nEdges.size()), static_cast<uint32_t>(finalFacets.size()));
setChunkInfoMesh(mChunkData[newChunkIndex], newMesh);
return newChunkIndex;
}
uint32_t FractureToolImpl::createNewChunk(uint32_t parentChunkId)
{
const uint32_t index = static_cast<uint32_t>(mChunkData.size());
mChunkData.push_back(ChunkInfo());
mChunkData.back().parentChunkId = parentChunkId;
mChunkData.back().chunkId = createId();
return index;
}
void FractureToolImpl::fitUvToRect(float side, uint32_t chunk)
{
int32_t infoIndex = getChunkInfoIndex(chunk);
if (mChunkPostprocessors.empty()) // It seems finalize have not been called, call it here.
{
finalizeFracturing();
}
if (infoIndex == -1 || (int32_t)mChunkPostprocessors.size() <= infoIndex)
{
return; // We dont have such chunk tringulated;
}
nvidia::NvBounds3 bnd;
bnd.setEmpty();
std::vector<Triangle>& ctrs = mChunkPostprocessors[infoIndex]->getBaseMesh();
std::vector<Triangle>& output = mChunkPostprocessors[infoIndex]->getBaseMesh();
for (uint32_t trn = 0; trn < ctrs.size(); ++trn)
{
if (ctrs[trn].userData == 0)
continue;
bnd.include(NvVec3(ctrs[trn].a.uv[0].x, ctrs[trn].a.uv[0].y, 0.0f));
bnd.include(NvVec3(ctrs[trn].b.uv[0].x, ctrs[trn].b.uv[0].y, 0.0f));
bnd.include(NvVec3(ctrs[trn].c.uv[0].x, ctrs[trn].c.uv[0].y, 0.0f));
}
float xscale = side / (bnd.maximum.x - bnd.minimum.x);
float yscale = side / (bnd.maximum.y - bnd.minimum.y);
xscale = std::min(xscale, yscale); // To have uniform scaling
for (uint32_t trn = 0; trn < ctrs.size(); ++trn)
{
if (ctrs[trn].userData == 0)
continue;
output[trn].a.uv[0].x = (ctrs[trn].a.uv[0].x - bnd.minimum.x) * xscale;
output[trn].b.uv[0].x = (ctrs[trn].b.uv[0].x - bnd.minimum.x) * xscale;
output[trn].c.uv[0].x = (ctrs[trn].c.uv[0].x - bnd.minimum.x) * xscale;
output[trn].a.uv[0].y = (ctrs[trn].a.uv[0].y - bnd.minimum.y) * xscale;
output[trn].b.uv[0].y = (ctrs[trn].b.uv[0].y - bnd.minimum.y) * xscale;
output[trn].c.uv[0].y = (ctrs[trn].c.uv[0].y - bnd.minimum.y) * xscale;
}
}
void FractureToolImpl::fitAllUvToRect(float side)
{
std::set<uint32_t> mask;
fitAllUvToRect(side, mask);
}
void FractureToolImpl::fitAllUvToRect(float side, std::set<uint32_t>& mask)
{
if (mChunkPostprocessors.empty()) // It seems finalize have not been called, call it here.
{
finalizeFracturing();
}
if (mChunkPostprocessors.empty())
{
return; // We dont have triangulated chunks.
}
nvidia::NvBounds3 bnd;
bnd.setEmpty();
for (uint32_t chunk = 0; chunk < mChunkData.size(); ++chunk)
{
Mesh* m = mChunkData[chunk].getMesh();
const Edge* edges = m->getEdges();
const Vertex* vertices = m->getVertices();
for (uint32_t trn = 0; trn < m->getFacetCount(); ++trn)
{
if (m->getFacet(trn)->userData == 0)
continue;
for (uint32_t ei = 0; ei < m->getFacet(trn)->edgesCount; ++ei)
{
int32_t v1 = edges[m->getFacet(trn)->firstEdgeNumber + ei].s;
int32_t v2 = edges[m->getFacet(trn)->firstEdgeNumber + ei].e;
bnd.include(NvVec3(vertices[v1].uv[0].x, vertices[v1].uv[0].y, 0.0f));
bnd.include(NvVec3(vertices[v2].uv[0].x, vertices[v2].uv[0].y, 0.0f));
}
}
}
float xscale = side / (bnd.maximum.x - bnd.minimum.x);
float yscale = side / (bnd.maximum.y - bnd.minimum.y);
xscale = std::min(xscale, yscale); // To have uniform scaling
for (uint32_t chunk = 0; chunk < mChunkPostprocessors.size(); ++chunk)
{
if (!mask.empty() && mask.find(mChunkPostprocessors[chunk]->getParentChunkId()) == mask.end())
continue;
std::vector<Triangle>& ctrs = mChunkPostprocessors[chunk]->getBaseMeshNotFitted();
std::vector<Triangle>& output = mChunkPostprocessors[chunk]->getBaseMesh();
for (uint32_t trn = 0; trn < ctrs.size(); ++trn)
{
if (ctrs[trn].userData == 0)
continue;
output[trn].a.uv[0].x = (ctrs[trn].a.uv[0].x - bnd.minimum.x) * xscale;
output[trn].b.uv[0].x = (ctrs[trn].b.uv[0].x - bnd.minimum.x) * xscale;
output[trn].c.uv[0].x = (ctrs[trn].c.uv[0].x - bnd.minimum.x) * xscale;
output[trn].a.uv[0].y = (ctrs[trn].a.uv[0].y - bnd.minimum.y) * xscale;
output[trn].b.uv[0].y = (ctrs[trn].b.uv[0].y - bnd.minimum.y) * xscale;
output[trn].c.uv[0].y = (ctrs[trn].c.uv[0].y - bnd.minimum.y) * xscale;
}
}
}
void FractureToolImpl::markLeaves()
{
for (ChunkInfo& info : mChunkData)
{
info.isLeaf = true;
}
for (ChunkInfo& info : mChunkData)
{
const int32_t infoIndex = getChunkInfoIndex(info.parentChunkId);
if (infoIndex >= 0)
{
mChunkData[infoIndex].isLeaf = false;
}
}
}
bool FractureToolImpl::setChunkInfoMesh(ChunkInfo& chunkInfo, Mesh* mesh, bool fromTransformed /*= true*/)
{
// Class to access protected ChunkInfo members
struct ChunkInfoAuth : public ChunkInfo
{
void setMesh(Mesh* mesh, const TransformST& parentTM)
{
meshData = mesh;
if (meshData != nullptr)
{
// Calculate the world transform
meshData->recalculateBoundingBox();
const TransformST localTM = createCubeTMFromBounds(meshData->getBoundingBox());
tmToWorld.s = parentTM.s * localTM.s;
tmToWorld.t = parentTM.s * localTM.t + parentTM.t;
// Transform vertex buffer to fit in unit cube
Vertex* verticesBuffer = meshData->getVerticesWritable();
for (uint32_t i = 0; i < meshData->getVerticesCount(); ++i)
{
Nv::Blast::Vertex& v = verticesBuffer[i];
v.p = localTM.invTransformPos(v.p);
}
// If none of chunk.tmToWorld scales are zero (or less than epsilon), then the bounds
// will be { {-1.0f, -1.0f, -1.0f}, {1.0f, 1.0f, 1.0f} }. Just in case, we properly
// calculate the bounds here.
meshData->recalculateBoundingBox();
}
else
{
tmToWorld = TransformST::identity();
}
}
bool isInitialized() const { return parentChunkId != ChunkInfo::UninitializedID; }
};
ChunkInfoAuth* auth = static_cast<ChunkInfoAuth*>(&chunkInfo);
if (!auth->isInitialized())
{
return false;
}
const TransformST parentTM = fromTransformed && chunkInfo.parentChunkId >= 0 ?
mChunkData[getChunkInfoIndex(chunkInfo.parentChunkId)].getTmToWorld() : TransformST::identity();
auth->setMesh(mesh, parentTM);
return true;
}
void FractureToolImpl::rebuildAdjGraph(const std::vector<uint32_t>& chunks, const NvcVec2i* adjChunks,
uint32_t adjChunksSize, std::vector<std::vector<uint32_t> >& chunkGraph)
{
std::vector<std::pair<uint64_t, uint32_t> > planeChunkIndex;
for (uint32_t i = 0; i < chunks.size(); ++i)
{
for (uint32_t fc = 0; fc < mChunkData[chunks[i]].getMesh()->getFacetCount(); ++fc)
{
if (mChunkData[chunks[i]].getMesh()->getFacet(fc)->userData != 0)
{
planeChunkIndex.push_back(
std::make_pair(std::abs(mChunkData[chunks[i]].getMesh()->getFacet(fc)->userData), chunks[i]));
}
}
}
{
std::sort(planeChunkIndex.begin(), planeChunkIndex.end());
auto it = std::unique(planeChunkIndex.begin(), planeChunkIndex.end());
planeChunkIndex.resize(it - planeChunkIndex.begin());
}
uint32_t a = 0;
for (uint32_t i = 1; i < planeChunkIndex.size(); ++i)
{
if (planeChunkIndex[a].first != planeChunkIndex[i].first)
{
uint32_t b = i;
for (uint32_t p1 = a; p1 < b; ++p1)
{
for (uint32_t p2 = p1 + 1; p2 < b; ++p2)
{
if (planeChunkIndex[p1].second == planeChunkIndex[p2].second ||
mChunkData[planeChunkIndex[p1].second].parentChunkId != mChunkData[planeChunkIndex[p2].second].parentChunkId)
{
continue;
}
bool has = false;
for (uint32_t k = 0; k < chunkGraph[planeChunkIndex[p1].second].size(); ++k)
{
if (chunkGraph[planeChunkIndex[p1].second][k] == planeChunkIndex[p2].second)
{
has = true;
break;
}
}
if (!has)
{
chunkGraph[planeChunkIndex[p1].second].push_back(planeChunkIndex[p2].second);
}
has = false;
for (uint32_t k = 0; k < chunkGraph[planeChunkIndex[p2].second].size(); ++k)
{
if (chunkGraph[planeChunkIndex[p2].second][k] == planeChunkIndex[p1].second)
{
has = true;
break;
}
}
if (!has)
{
chunkGraph[planeChunkIndex[p2].second].push_back(planeChunkIndex[p1].second);
}
}
}
a = b;
}
}
// Add in extra adjacency info, if we have it
if (adjChunks && adjChunksSize)
{
std::set<uint32_t> chunkSet(chunks.begin(), chunks.end());
#if NV_DEBUG || NV_CHECKED // Make sure these arrays are sorted
for (std::vector<uint32_t>& adj : chunkGraph)
{
const bool isSorted = std::is_sorted(adj.begin(), adj.end());
if (!isSorted)
{
NVBLAST_ASSERT(0);
NvBlastGlobalGetErrorCallback()->reportError(nvidia::NvErrorCode::eDEBUG_WARNING, "Adjacency array not sorted; subsequent code assumes it is.", __FILE__, __LINE__);
}
}
#endif
for (uint32_t i = 0; i < adjChunksSize; ++i)
{
const NvcVec2i& pair = adjChunks[i];
if (chunkSet.find((uint32_t)pair.x) == chunkSet.end() || chunkSet.find((uint32_t)pair.y) == chunkSet.end())
{
continue;
}
{
std::vector<uint32_t>& adj0 = chunkGraph[pair.x];
std::vector<uint32_t>::iterator it0 = std::lower_bound(adj0.begin(), adj0.end(), (uint32_t)pair.y);
if (it0 == adj0.end() || *it0 != (uint32_t)pair.y)
{
adj0.insert(it0, (uint32_t)pair.y);
}
}
{
std::vector<uint32_t>& adj1 = chunkGraph[pair.y];
std::vector<uint32_t>::iterator it1 = std::lower_bound(adj1.begin(), adj1.end(), (uint32_t)pair.x);
if (it1 == adj1.end() || *it1 != (uint32_t)pair.x)
{
adj1.insert(it1, (uint32_t)pair.x);
}
}
}
}
}
bool VecIntComp(const std::pair<NvcVec3, uint32_t>& a, const std::pair<NvcVec3, uint32_t>& b)
{
if (a.first.x < b.first.x)
return true;
if (a.first.x > b.first.x)
return false;
if (a.first.y < b.first.y)
return true;
if (a.first.y > b.first.y)
return false;
if (a.first.z < b.first.z)
return true;
if (a.first.z > b.first.z)
return false;
return a.second < b.second;
}
void FractureToolImpl::uniteChunks(uint32_t threshold, uint32_t targetClusterSize, const uint32_t* chunksToMerge, uint32_t mergeChunkCount,
const NvcVec2i* adjChunks, uint32_t adjChunksSize, bool removeOriginalChunks /*= false*/)
{
std::vector<int32_t> depth(mChunkData.size(), 0);
std::vector<std::vector<uint32_t> > chunkGraph(mChunkData.size());
std::vector<uint32_t> atEachDepth;
std::vector<uint32_t> childNumber(mChunkData.size(), 0);
std::vector<uint32_t> chunksToRemove;
enum ChunkFlags
{
Mergeable = (1 << 0),
Merged = (1 << 1)
};
std::vector<uint32_t> chunkFlags(mChunkData.size());
if (chunksToMerge == nullptr)
{
std::fill(chunkFlags.begin(), chunkFlags.end(), Mergeable);
}
else
{
// Seed all mergeable chunks with Mergeable flag
for (uint32_t chunkN = 0; chunkN < mergeChunkCount; ++chunkN)
{
const uint32_t chunkIndex = chunksToMerge[chunkN];
chunkFlags[chunkIndex] |= Mergeable;
}
// Make all descendants mergable too
std::vector<int32_t> treeWalk;
for (uint32_t chunkInfoIndex = 0; chunkInfoIndex < mChunkData.size(); ++chunkInfoIndex)
{
treeWalk.clear();
int32_t walkInfoIndex = (int32_t)chunkInfoIndex;
do
{
if (chunkFlags[walkInfoIndex] & Mergeable)
{
std::for_each(treeWalk.begin(), treeWalk.end(), [&chunkFlags](int32_t index) {chunkFlags[index] |= Mergeable; });
break;
}
treeWalk.push_back(walkInfoIndex);
} while ((walkInfoIndex = getChunkInfoIndex(mChunkData[walkInfoIndex].parentChunkId)) >= 0);
}
}
int32_t maxDepth = 0;
for (uint32_t i = 0; i < mChunkData.size(); ++i)
{
if (mChunkData[i].parentChunkId != -1)
childNumber[getChunkInfoIndex(mChunkData[i].parentChunkId)]++;
depth[i] = getChunkDepth(mChunkData[i].chunkId);
NVBLAST_ASSERT(depth[i] >= 0);
maxDepth = std::max(maxDepth, depth[i]);
}
for (int32_t level = maxDepth; level > 0; --level) // go from leaves to trunk and rebuild hierarchy
{
std::vector<uint32_t> cGroup;
std::vector<uint32_t> chunksToUnify;
NvcVec3 minPoint = {MAXIMUM_EXTENT, MAXIMUM_EXTENT, MAXIMUM_EXTENT};
VrtPositionComparator posc;
for (uint32_t ch = 0; ch < depth.size(); ++ch)
{
if (depth[ch] == level && childNumber[getChunkInfoIndex(mChunkData[ch].parentChunkId)] > threshold && (chunkFlags[ch] & Mergeable) != 0)
{
chunksToUnify.push_back(ch);
NvcVec3 cp = fromNvShared(toNvShared(mChunkData[ch].getMesh()->getBoundingBox()).getCenter());
if (posc(cp, minPoint))
{
minPoint = cp;
}
}
}
std::vector<std::pair<float, uint32_t> > distances;
for (uint32_t i = 0; i < chunksToUnify.size(); ++i)
{
float d = (toNvShared(minPoint) - toNvShared(mChunkData[chunksToUnify[i]].getMesh()->getBoundingBox()).getCenter()).magnitude();
distances.push_back(std::make_pair(d, chunksToUnify[i]));
}
std::sort(distances.begin(), distances.end());
for (uint32_t i = 0; i < chunksToUnify.size(); ++i)
{
chunksToUnify[i] = distances[i].second;
}
rebuildAdjGraph(chunksToUnify, adjChunks, adjChunksSize, chunkGraph);
for (uint32_t iter = 0; iter < 32 && chunksToUnify.size() > threshold; ++iter)
{
std::vector<uint32_t> newChunksToUnify;
for (uint32_t c = 0; c < chunksToUnify.size(); ++c)
{
if ((chunkFlags[chunksToUnify[c]] & Mergeable) == 0)
continue;
chunkFlags[chunksToUnify[c]] &= ~Mergeable;
cGroup.push_back(chunksToUnify[c]);
for (uint32_t sc = 0; sc < cGroup.size() && cGroup.size() < targetClusterSize; ++sc)
{
uint32_t sid = cGroup[sc];
for (uint32_t neighbN = 0; neighbN < chunkGraph[sid].size() && cGroup.size() < targetClusterSize; ++neighbN)
{
const uint32_t chunkNeighb = chunkGraph[sid][neighbN];
if (mChunkData[chunkNeighb].parentChunkId != mChunkData[sid].parentChunkId)
continue;
if ((chunkFlags[chunkNeighb] & Mergeable) == 0)
continue;
chunkFlags[chunkNeighb] &= ~Mergeable;
cGroup.push_back(chunkNeighb);
}
}
if (cGroup.size() > 1)
{
uint32_t newChunk = stretchGroup(cGroup, chunkGraph);
for (uint32_t chunk : cGroup)
{
if (removeOriginalChunks && !(chunkFlags[chunk] & Merged))
{
chunksToRemove.push_back(chunk);
}
}
cGroup.clear();
newChunksToUnify.push_back(newChunk);
chunkFlags.push_back(Merged);
}
else
{
cGroup.clear();
}
}
chunksToUnify = newChunksToUnify;
rebuildAdjGraph(chunksToUnify, adjChunks, adjChunksSize, chunkGraph);
}
}
// Remove chunks
std::vector<uint32_t> remap(mChunkData.size(), 0xFFFFFFFF);
std::sort(chunksToRemove.begin(), chunksToRemove.end());
std::vector<uint32_t>::iterator removeIt = chunksToRemove.begin();
size_t chunkWriteIndex = 0;
for (size_t chunkReadIndex = 0; chunkReadIndex < mChunkData.size(); ++chunkReadIndex)
{
if (removeIt < chunksToRemove.end())
{
if (*removeIt == chunkReadIndex)
{
++removeIt;
continue;
}
}
if (chunkReadIndex != chunkWriteIndex)
{
mChunkData[chunkWriteIndex] = mChunkData[chunkReadIndex];
}
remap[chunkReadIndex] = chunkWriteIndex++;
}
mChunkData.resize(chunkWriteIndex);
for (ChunkInfo& chunkInfo : mChunkData)
{
if (chunkInfo.parentChunkId >= 0)
{
const uint32_t mappedParentIndex = remap[getChunkInfoIndex(chunkInfo.parentChunkId)];
NVBLAST_ASSERT(mappedParentIndex < mChunkData.size());
if (mappedParentIndex < mChunkData.size())
{
chunkInfo.parentChunkId = mChunkData[mappedParentIndex].chunkId;
}
}
}
}
bool FractureToolImpl::setApproximateBonding(uint32_t chunkIndex, bool useApproximateBonding)
{
if ((size_t)chunkIndex >= mChunkData.size())
{
return false;
}
if (useApproximateBonding)
{
mChunkData[chunkIndex].flags |= (uint32_t)ChunkInfo::APPROXIMATE_BONDING;
}
else
{
mChunkData[chunkIndex].flags &= ~(uint32_t)ChunkInfo::APPROXIMATE_BONDING;
}
return true;
}
int32_t FractureToolImpl::createId()
{
// make sure there is a free ID to be returned
if (mChunkIdsUsed.size() >= (size_t)INT32_MAX + 1)
{
NvBlastGlobalGetErrorCallback()->reportError(nvidia::NvErrorCode::eINTERNAL_ERROR, "Chunk IDs exhausted.", __FILE__, __LINE__);
return -1;
}
// find the next free ID
while (mChunkIdsUsed.count(mNextChunkId))
{
// handle wrapping
if (++mNextChunkId < 0)
mNextChunkId = 0;
}
// step the counter and handle wrapping
const int32_t id = mNextChunkId++;
if (mNextChunkId < 0)
mNextChunkId = 0;
return (reserveId(id) ? id : -1);
}
bool FractureToolImpl::reserveId(int32_t id)
{
// add it to the used set and make sure it wasn't already in there
const auto ret = mChunkIdsUsed.insert(id);
NVBLAST_ASSERT_WITH_MESSAGE(ret.second, "Request to reserve ID, but it is already in use");
return ret.second;
}
} // namespace Blast
} // namespace Nv
| 98,101 | C++ | 33.567301 | 180 | 0.569036 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringPerlinNoise.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTEXTAUTHORINGPERLINNOISE_H
#define NVBLASTEXTAUTHORINGPERLINNOISE_H
#include <NvBlastExtAuthoringFractureTool.h>
#include "NvVec4.h"
#include "NvVec3.h"
#define PERLIN_NOISE_SAMPLE_TABLE 512
using nvidia::NvVec3;
namespace Nv
{
namespace Blast
{
/***********
Noise generation routines, copied from Apex.
*/
NV_INLINE float at3(const float& rx, const float& ry, const float& rz, const NvVec3 q)
{
return rx * q[0] + ry * q[1] + rz * q[2];
}
NV_INLINE float fade(float t) { return t * t * t * (t * (t * 6.0f - 15.0f) + 10.0f); }
NV_INLINE float lerp(float t, float a, float b) { return a + t * (b - a); }
NV_INLINE void setup(int i, NvVec3 point, float& t, int& b0, int& b1, float& r0, float& r1)
{
t = point[i] + (0x1000);
b0 = ((int)t) & (PERLIN_NOISE_SAMPLE_TABLE - 1);
b1 = (b0 + 1) & (PERLIN_NOISE_SAMPLE_TABLE - 1);
r0 = t - (int)t;
r1 = r0 - 1.0f;
}
NV_INLINE float noiseSample(NvVec3 point, int* p, NvVec3* g)
{
int bx0, bx1, by0, by1, bz0, bz1, b00, b10, b01, b11;
float rx0, rx1, ry0, ry1, rz0, rz1, sy, sz, a, b, c, d, t, u, v;
NvVec3 q;
int i, j;
setup(0, point, t, bx0, bx1, rx0, rx1);
setup(1, point, t, by0, by1, ry0, ry1);
setup(2, point, t, bz0, bz1, rz0, rz1);
i = p[bx0];
j = p[bx1];
b00 = p[i + by0];
b10 = p[j + by0];
b01 = p[i + by1];
b11 = p[j + by1];
t = fade(rx0);
sy = fade(ry0);
sz = fade(rz0);
q = g[b00 + bz0]; u = at3(rx0, ry0, rz0, q);
q = g[b10 + bz0]; v = at3(rx1, ry0, rz0, q);
a = lerp(t, u, v);
q = g[b01 + bz0]; u = at3(rx0, ry1, rz0, q);
q = g[b11 + bz0]; v = at3(rx1, ry1, rz0, q);
b = lerp(t, u, v);
c = lerp(sy, a, b);
q = g[b00 + bz1]; u = at3(rx0, ry0, rz1, q);
q = g[b10 + bz1]; v = at3(rx1, ry0, rz1, q);
a = lerp(t, u, v);
q = g[b01 + bz1]; u = at3(rx0, ry1, rz1, q);
q = g[b11 + bz1]; v = at3(rx1, ry1, rz1, q);
b = lerp(t, u, v);
d = lerp(sy, a, b);
return lerp(sz, c, d);
}
/**
Perlin Noise generation tool
*/
class PerlinNoise
{
public:
/**
\param[in] rnd Random value generator
\param[in] octaves Number of noise octaves
\param[in] frequency Frequency of noise
\param[in] amplitude Amplitude of noise
*/
PerlinNoise(Nv::Blast::RandomGeneratorBase* rnd, int octaves = 1, float frequency = 1., float amplitude = 1.)
: mRnd(rnd),
mOctaves(octaves),
mFrequency(frequency),
mAmplitude(amplitude),
mbInit(false)
{
}
/*
Reset state of noise generator
\param[in] octaves Number of noise octaves
\param[in] frequency Frequency of noise
\param[in] amplitude Amplitude of noise
*/
void reset(int octaves = 1, float frequency = 1.f, float amplitude = 1.f)
{
mOctaves = octaves;
mFrequency = frequency;
mAmplitude = amplitude;
init();
}
/**
Get Perlin Noise value at given point
*/
float sample(const nvidia::NvVec3& point)
{
return perlinNoise(point);
}
private:
PerlinNoise& operator=(const PerlinNoise&);
float perlinNoise(nvidia::NvVec3 point)
{
if (!mbInit)
init();
const int octaves = mOctaves;
const float frequency = mFrequency;
float amplitude = mAmplitude;
float result = 0.0f;
point *= frequency;
for (int i = 0; i < octaves; ++i)
{
NvVec3 lpnt;
lpnt[0] = point.x;
lpnt[1] = point.y;
lpnt[2] = point.z;
result += (noiseSample(lpnt, p, g)) * amplitude;
point *= 2.0f;
amplitude *= 0.5f;
}
return result;
}
void init(void)
{
mbInit = true;
unsigned i, j;
int k;
for (i = 0; i < (unsigned)PERLIN_NOISE_SAMPLE_TABLE; i++)
{
p[i] = (int)i;
for (j = 0; j < 3; ++j)
g[i][j] = mRnd->getRandomValue();
g[i].normalize();
}
while (--i)
{
k = p[i];
j = static_cast<uint32_t>(mRnd->getRandomValue() * PERLIN_NOISE_SAMPLE_TABLE);
p[i] = p[j];
p[j] = k;
}
for (i = 0; i < PERLIN_NOISE_SAMPLE_TABLE + 2; ++i)
{
p[(unsigned)PERLIN_NOISE_SAMPLE_TABLE + i] = p[i];
for (j = 0; j < 3; ++j)
g[(unsigned)PERLIN_NOISE_SAMPLE_TABLE + i][j] = g[i][j];
}
}
Nv::Blast::RandomGeneratorBase* mRnd;
int mOctaves;
float mFrequency;
float mAmplitude;
// Permutation vector
int p[(unsigned)(PERLIN_NOISE_SAMPLE_TABLE + PERLIN_NOISE_SAMPLE_TABLE + 2)];
// Gradient vector
NvVec3 g[(unsigned)(PERLIN_NOISE_SAMPLE_TABLE + PERLIN_NOISE_SAMPLE_TABLE + 2)];
bool mbInit;
};
/**
Simplex noise generation tool
*/
class SimplexNoise
{
int32_t mOctaves;
float mAmplitude;
float mFrequency;
int32_t mSeed;
static const int X_NOISE_GEN = 1619;
static const int Y_NOISE_GEN = 31337;
static const int Z_NOISE_GEN = 6971;
static const int W_NOISE_GEN = 1999;
static const int SEED_NOISE_GEN = 1013;
static const int SHIFT_NOISE_GEN = 8;
NV_INLINE int fastfloor(float x)
{
return (x >= 0) ? (int)x : (int)(x - 1);
}
SimplexNoise& operator=(const SimplexNoise&)
{
return *this;
}
public:
/**
\param[in] ampl Amplitude of noise
\param[in] freq Frequency of noise
\param[in] octaves Number of noise octaves
\param[in] seed Random seed value
*/
SimplexNoise(float ampl, float freq, int32_t octaves, int32_t seed) : mOctaves(octaves), mAmplitude(ampl), mFrequency(freq), mSeed(seed) {};
// 4D simplex noise
// returns: (x,y,z) = noise grad, w = noise value
/**
Evaluate noise at given 4d-point
\param[in] x x coordinate of point
\param[in] y y coordinate of point
\param[in] z z coordinate of point
\param[in] w w coordinate of point
\param[in] seed Random seed value
\return Noise valued vector (x,y,z) and scalar (w)
*/
nvidia::NvVec4 eval4D(float x, float y, float z, float w, int seed)
{
// The skewing and unskewing factors are hairy again for the 4D case
const float F4 = (nvidia::NvSqrt(5.0f) - 1.0f) / 4.0f;
const float G4 = (5.0f - nvidia::NvSqrt(5.0f)) / 20.0f;
// Skew the (x,y,z,w) space to determine which cell of 24 simplices we're in
float s = (x + y + z + w) * F4; // Factor for 4D skewing
int ix = fastfloor(x + s);
int iy = fastfloor(y + s);
int iz = fastfloor(z + s);
int iw = fastfloor(w + s);
float tu = (ix + iy + iz + iw) * G4; // Factor for 4D unskewing
// Unskew the cell origin back to (x,y,z,w) space
float x0 = x - (ix - tu); // The x,y,z,w distances from the cell origin
float y0 = y - (iy - tu);
float z0 = z - (iz - tu);
float w0 = w - (iw - tu);
int c = (x0 > y0) ? (1 << 0) : (1 << 2);
c += (x0 > z0) ? (1 << 0) : (1 << 4);
c += (x0 > w0) ? (1 << 0) : (1 << 6);
c += (y0 > z0) ? (1 << 2) : (1 << 4);
c += (y0 > w0) ? (1 << 2) : (1 << 6);
c += (z0 > w0) ? (1 << 4) : (1 << 6);
nvidia::NvVec4 res;
res.setZero();
// Calculate the contribution from the five corners
for (int p = 4; p >= 0; --p)
{
int ixp = ((c >> 0) & 3) >= p ? 1 : 0;
int iyp = ((c >> 2) & 3) >= p ? 1 : 0;
int izp = ((c >> 4) & 3) >= p ? 1 : 0;
int iwp = ((c >> 6) & 3) >= p ? 1 : 0;
float xp = x0 - ixp + (4 - p) * G4;
float yp = y0 - iyp + (4 - p) * G4;
float zp = z0 - izp + (4 - p) * G4;
float wp = w0 - iwp + (4 - p) * G4;
float t = 0.6f - xp * xp - yp * yp - zp * zp - wp * wp;
if (t > 0)
{
//get index
int gradIndex = int((
X_NOISE_GEN * (ix + ixp)
+ Y_NOISE_GEN * (iy + iyp)
+ Z_NOISE_GEN * (iz + izp)
+ W_NOISE_GEN * (iw + iwp)
+ SEED_NOISE_GEN * seed)
& 0xffffffff);
gradIndex ^= (gradIndex >> SHIFT_NOISE_GEN);
gradIndex &= 31;
nvidia::NvVec4 g;
{
const int h = gradIndex;
const int hs = 2 - (h >> 4);
const int h1 = (h >> 3);
g.x = (h1 == 0) ? 0.0f : ((h & 4) ? -1.0f : 1.0f);
g.y = (h1 == 1) ? 0.0f : ((h & (hs << 1)) ? -1.0f : 1.0f);
g.z = (h1 == 2) ? 0.0f : ((h & hs) ? -1.0f : 1.0f);
g.w = (h1 == 3) ? 0.0f : ((h & 1) ? -1.0f : 1.0f);
}
float gdot = (g.x * xp + g.y * yp + g.z * zp + g.w * wp);
float t2 = t * t;
float t3 = t2 * t;
float t4 = t3 * t;
float dt4gdot = 8 * t3 * gdot;
res.x += t4 * g.x - dt4gdot * xp;
res.y += t4 * g.y - dt4gdot * yp;
res.z += t4 * g.z - dt4gdot * zp;
res.w += t4 * gdot;
}
}
// scale the result to cover the range [-1,1]
res *= 27;
return res;
}
/**
Evaluate noise at given 3d-point
\param[in] p Point in which noise will be evaluated
\return Noise value at given point
*/
float sample(nvidia::NvVec3 p)
{
p *= mFrequency;
float result = 0.0f;
float alpha = 1;
for (int32_t i = 1; i <= mOctaves; ++i)
{
result += eval4D(p.x * i, p.y * i, p.z * i, i * 5.0f, mSeed).w * alpha;
alpha *= 0.45f;
}
return result * mAmplitude;
}
};
} // Blast namespace
} // Nv namespace
#endif | 11,924 | C | 29.655527 | 144 | 0.507632 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringFractureToolImpl.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTAUTHORINGFRACTURETOOLIMPL_H
#define NVBLASTAUTHORINGFRACTURETOOLIMPL_H
#include "NvBlastExtAuthoringFractureTool.h"
#include "NvBlastExtAuthoringMesh.h"
#include <vector>
#include <set>
namespace Nv
{
namespace Blast
{
class SpatialAccelerator;
class Triangulator;
/**
Class for voronoi sites generation inside supplied mesh.
*/
class VoronoiSitesGeneratorImpl : public VoronoiSitesGenerator
{
public:
/**
Voronoi sites should not be generated outside of the fractured mesh, so VoronoiSitesGenerator
should be supplied with fracture mesh.
\param[in] mesh Fracture mesh
\param[in] rnd User supplied random value generator.
\return
*/
VoronoiSitesGeneratorImpl(const Mesh* mesh, RandomGeneratorBase* rnd);
~VoronoiSitesGeneratorImpl();
void release() override;
/**
Set base fracture mesh
*/
void setBaseMesh(const Mesh* m) override;
/**
Access to generated voronoi sites.
\note User should call NVBLAST_FREE for hulls and hullsOffset when it not needed anymore
\param[out] Pointer to generated voronoi sites
\return Count of generated voronoi sites.
*/
uint32_t getVoronoiSites(const NvcVec3*& sites) override;
/**
Add site in particular point
\param[in] site Site coordinates
*/
void addSite(const NvcVec3& site) override;
/**
Uniformly generate sites inside the mesh
\param[in] numberOfSites Number of generated sites
*/
void uniformlyGenerateSitesInMesh(uint32_t numberOfSites) override;
/**
Generate sites in clustered fashion
\param[in] numberOfClusters Number of generated clusters
\param[in] sitesPerCluster Number of sites in each cluster
\param[in] clusterRadius Voronoi cells cluster radius
*/
void clusteredSitesGeneration(uint32_t numberOfClusters, uint32_t sitesPerCluster, float clusterRadius) override;
/**
Radial pattern of sites generation
\param[in] center Center of generated pattern
\param[in] normal Normal to plane in which sites are generated
\param[in] radius Pattern radius
\param[in] angularSteps Number of angular steps
\param[in] radialSteps Number of radial steps
\param[in] angleOffset Angle offset at each radial step
\param[in] variability Randomness of sites distribution
*/
void radialPattern(const NvcVec3& center, const NvcVec3& normal, float radius, int32_t angularSteps, int32_t radialSteps, float angleOffset = 0.0f, float variability = 0.0f) override;
/**
Generate sites inside sphere
\param[in] count Count of generated sites
\param[in] radius Radius of sphere
\param[in] center Center of sphere
*/
void generateInSphere(const uint32_t count, const float radius, const NvcVec3& center) override;
/**
Set stencil mesh. With stencil mesh sites are generated only inside both of fracture and stencil meshes.
\param[in] stencil Stencil mesh.
*/
void setStencil(const Mesh* stencil) override;
/**
Removes stencil mesh
*/
void clearStencil() override;
/**
Deletes sites inside supplied sphere
\param[in] radius Radius of sphere
\param[in] center Center of sphere
\param[in] eraserProbability Probability of removing some particular site
*/
void deleteInSphere(const float radius, const NvcVec3& center, const float eraserProbability = 1) override;
private:
std::vector <NvcVec3> mGeneratedSites;
const Mesh* mMesh;
const Mesh* mStencil;
RandomGeneratorBase* mRnd;
SpatialAccelerator* mAccelerator;
};
/**
FractureTool class provides methods to fracture provided mesh and generate Blast asset data
*/
class FractureToolImpl : public FractureTool
{
public:
/**
FractureTool can log asset creation info if logCallback is provided.
*/
FractureToolImpl() : mRemoveIslands(false)
{
reset();
}
~FractureToolImpl()
{
reset();
}
void release() override;
/**
Reset FractureTool state.
*/
void reset() override;
/**
Set the material id to use for new interior faces. Defaults to kMaterialInteriorId
*/
void setInteriorMaterialId(int32_t materialId) override;
/**
Gets the material id to use for new interior faces
*/
int32_t getInteriorMaterialId() const override;
/**
Replaces an material id on faces with a new one
*/
void replaceMaterialId(int32_t oldMaterialId, int32_t newMaterialId) override;
/**
Set input meshes which will be fractured, FractureTool will be reset.
If ids != nullptr, it must point to an array of length meshSizes.
Each mesh will be assigned to a chunk with ID given by the corresponding element in ids.
If the corresponding element is negative, or ids is NULL, then the chunk will be assigned
an arbitrary (but currently unused) ID.
Returns true iff all meshes were assigned chunks with valid IDs.
*/
bool setSourceMeshes(Mesh const * const * meshes, uint32_t meshesSize, const int32_t* ids = nullptr) override;
/**
Set chunk mesh, parentId should be valid, return ID of new chunk.
if chunkId >= 0 and currently unused, then that ID will be used (and returned).
Otherwise an arbitrary (but currently unused) ID will be used and returned.
*/
int32_t setChunkMesh(const Mesh* mesh, int32_t parentId, int32_t chunkId = -1) override;
/**
Get chunk mesh in polygonal representation
*/
Mesh* createChunkMesh(int32_t chunkInfoIndex, bool splitUVs = true) override;
/**
Fractures specified chunk with voronoi method.
\param[in] chunkId Chunk to fracture
\param[in] cellPoints Array of voronoi sites
\param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them.
Case replaceChunk == true && chunkId == 0 considered as wrong input parameters
\return If 0, fracturing is successful.
*/
int32_t voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPoints, bool replaceChunk) override;
/**
Fractures specified chunk with voronoi method. Cells can be scaled along x,y,z axes.
\param[in] chunkId Chunk to fracture
\param[in] cellPoints Array of voronoi sites
\param[in] cellPoints Array of voronoi sites
\param[in] scale Voronoi cells scaling factor
\param[in] rotation Voronoi cells rotation. Has no effect without cells scale factor
\param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them.
Case replaceChunk == true && chunkId == 0 considered as wrong input parameters
\return If 0, fracturing is successful.
*/
int32_t voronoiFracturing(uint32_t chunkId, uint32_t cellCount, const NvcVec3* cellPoints, const NvcVec3& scale, const NvcQuat& rotation, bool replaceChunk) override;
/**
Fractures specified chunk with slicing method.
\param[in] chunkId Chunk to fracture
\param[in] conf Slicing parameters, see SlicingConfiguration.
\param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them.
Case replaceChunk == true && chunkId == 0 considered as wrong input parameters
\param[in] rnd User supplied random number generator
\return If 0, fracturing is successful.
*/
int32_t slicing(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk, RandomGeneratorBase* rnd) override;
/**
Cut chunk with plane.
\param[in] chunkId Chunk to fracture
\param[in] normal Plane normal
\param[in] position Point on plane
\param[in] noise Noise configuration for plane-chunk intersection, see NoiseConfiguration.
\param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them.
Case replaceChunk == true && chunkId == 0 considered as wrong input parameters
\param[in] rnd User supplied random number generator
\return If 0, fracturing is successful.
*/
int32_t cut(uint32_t chunkId, const NvcVec3& normal, const NvcVec3& position, const NoiseConfiguration& noise, bool replaceChunk, RandomGeneratorBase* rnd) override;
/**
Cutout fracture for specified chunk.
\param[in] chunkId Chunk to fracture
\param[in] conf Cutout parameters, see CutoutConfiguration.
\param[in] replaceChunk if 'true', newly generated chunks will replace source chunk, if 'false', newly generated chunks will be at next depth level, source chunk will be parent for them.
Case replaceChunk == true && chunkId == 0 considered as wrong input parameters
\param[in] rnd User supplied random number generator
\return If 0, fracturing is successful.
*/
int32_t cutout(uint32_t chunkId, CutoutConfiguration conf, bool replaceChunk, RandomGeneratorBase* rnd) override;
/**
Creates resulting fractured mesh geometry from intermediate format
*/
void finalizeFracturing() override;
uint32_t getChunkCount() const override;
/**
Get chunk information
*/
const ChunkInfo& getChunkInfo(int32_t chunkInfoIndex) override;
/**
Get percentage of mesh overlap.
percentage computed as volume(intersection(meshA , meshB)) / volume (meshA)
\param[in] meshA Mesh A
\param[in] meshB Mesh B
\return mesh overlap percentage
*/
float getMeshOverlap(const Mesh& meshA, const Mesh& meshB) override;
/**
Get chunk base mesh
\note User should call NVBLAST_FREE for output when it not needed anymore
\param[in] chunkIndex Chunk index
\param[out] output Array of triangles to be filled
\return number of triangles in base mesh
*/
uint32_t getBaseMesh(int32_t chunkIndex, Triangle*& output) override;
/**
Update chunk base mesh
\note Doesn't allocates output array, Triangle* output should be preallocated by user
\param[in] chunkIndex Chunk index
\param[out] output Array of triangles to be filled
\return number of triangles in base mesh
*/
uint32_t updateBaseMesh(int32_t chunkIndex, Triangle* output) override;
/**
Return info index of chunk with specified chunkId
\param[in] chunkId Chunk ID
\return Chunk index in internal buffer, if not exist -1 is returned.
*/
int32_t getChunkInfoIndex(int32_t chunkId) const override;
/**
Return id of chunk with specified index.
\param[in] chunkInfoIndex Chunk info index
\return Chunk id or -1 if there is no such chunk.
*/
int32_t getChunkId(int32_t chunkInfoIndex) const override;
/**
Return depth level of the given chunk
\param[in] chunkId Chunk ID
\return Chunk depth or -1 if there is no such chunk.
*/
int32_t getChunkDepth(int32_t chunkId) const override;
/**
Return array of chunks IDs with given depth.
\note User should call NVBLAST_FREE for chunkIds when it not needed anymore
\param[in] depth Chunk depth
\param[out] Pointer to array of chunk IDs
\return Number of chunks in array
*/
uint32_t getChunksIdAtDepth(uint32_t depth, int32_t*& chunkIds) const override;
/**
Get result geometry without noise as vertex and index buffers, where index buffers contain series of triplets
which represent triangles.
\note User should call NVBLAST_FREE for vertexBuffer, indexBuffer and indexBufferOffsets when it not needed anymore
\param[out] vertexBuffer Array of vertices to be filled
\param[out] indexBuffer Array of indices to be filled
\param[out] indexBufferOffsets Array of offsets in indexBuffer for each base mesh.
Contains getChunkCount() + 1 elements. Last one is indexBuffer size
\return Number of vertices in vertexBuffer
*/
uint32_t getBufferedBaseMeshes(Vertex*& vertexBuffer, uint32_t*& indexBuffer, uint32_t*& indexBufferOffsets) override;
/**
Set automatic islands removing. May cause instabilities.
\param[in] isRemoveIslands Flag whether remove or not islands.
*/
void setRemoveIslands(bool isRemoveIslands) override;
/**
Try find islands and remove them on some specifical chunk. If chunk has childs, island removing can lead to wrong results! Apply it before further chunk splitting.
\param[in] chunkId Chunk ID which should be checked for islands
\return Number of found islands is returned
*/
int32_t islandDetectionAndRemoving(int32_t chunkId, bool createAtNewDepth = false) override;
/**
Check if input mesh contains open edges. Open edges can lead to wrong fracturing results.
\return true if mesh contains open edges
*/
bool isMeshContainOpenEdges(const Mesh* input) override;
bool deleteChunkSubhierarchy(int32_t chunkId, bool deleteRoot = false) override;
void uniteChunks(uint32_t threshold, uint32_t targetClusterSize,
const uint32_t* chunksToMerge, uint32_t mergeChunkCount,
const NvcVec2i* adjChunks, uint32_t adjChunksSize,
bool removeOriginalChunks = false) override;
bool setApproximateBonding(uint32_t chunkId, bool useApproximateBonding) override;
/**
Rescale interior uv coordinates of given chunk to fit square of given size.
\param[in] side Size of square side
\param[in] chunkId Chunk ID for which UVs should be scaled.
*/
void fitUvToRect(float side, uint32_t chunkId) override;
/**
Rescale interior uv coordinates of all existing chunks to fit square of given size, relative sizes will be preserved.
\param[in] side Size of square side
*/
void fitAllUvToRect(float side) override;
private:
bool isAncestorForChunk(int32_t ancestorId, int32_t chunkId);
int32_t slicingNoisy(uint32_t chunkId, const SlicingConfiguration& conf, bool replaceChunk, RandomGeneratorBase* rnd);
uint32_t stretchGroup(const std::vector<uint32_t>& group, std::vector<std::vector<uint32_t>>& graph);
void rebuildAdjGraph(const std::vector<uint32_t>& chunksToRebuild, const NvcVec2i* adjChunks, uint32_t adjChunksSize,
std::vector<std::vector<uint32_t> >& chunkGraph);
void fitAllUvToRect(float side, std::set<uint32_t>& mask);
void markLeaves();
/*
* Meshes are transformed to fit a unit cube, for algorithmic stability. This transform is stored
* in the ChunkInfo. Some meshes are created from already-transformed chunks. If so, set
* fromTransformed = true, so that the transform-to-world can be concatenated with the source mesh's.
*
* chunkInfo.parentChunkId must be valid if fromTransformed == true.
*
* Returns true iff successful.
*/
bool setChunkInfoMesh(ChunkInfo& chunkInfo, Mesh* mesh, bool fromTransformed = true);
/**
Returns newly created chunk index in mChunkData.
*/
uint32_t createNewChunk(uint32_t parentChunkId);
/**
* Returns a previously unused ID.
*/
int32_t createId();
/**
* Mark the given ID as being used. Returns false if that ID was already marked as in use, true otherwise
*/
bool reserveId(int32_t id);
protected:
/* Chunk mesh wrappers */
std::vector<Triangulator*> mChunkPostprocessors;
int64_t mPlaneIndexerOffset;
int32_t mNextChunkId;
std::set<int32_t> mChunkIdsUsed;
std::vector<ChunkInfo> mChunkData;
bool mRemoveIslands;
int32_t mInteriorMaterialId;
};
int32_t findCellBasePlanes(const std::vector<NvcVec3>& sites, std::vector<std::vector<std::pair<int32_t, int32_t>>>& neighbors);
Mesh* getCellMesh(class BooleanEvaluator& eval, int32_t planeIndexerOffset, int32_t cellId, const std::vector<NvcVec3>& sites, const std::vector<std::vector<std::pair<int32_t, int32_t>>>& neighbors, int32_t interiorMaterialId, NvcVec3 origin);
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTAUTHORINGFRACTURETOOLIMPL_H
| 20,783 | C | 45.084257 | 243 | 0.615407 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBondGeneratorImpl.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
// This warning arises when using some stl containers with older versions of VC
// c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code
#include "NvPreprocessor.h"
#if NV_VC && NV_VC < 14
#pragma warning(disable : 4702)
#endif
#include <NvBlastExtAuthoringBondGeneratorImpl.h>
#include <NvBlast.h>
#include <NvBlastGlobals.h>
#include <NvBlastNvSharedHelpers.h>
#include "NvBlastExtTriangleProcessor.h"
#include "NvBlastExtApexSharedParts.h"
#include "NvBlastExtAuthoringInternalCommon.h"
#include "NvBlastExtAuthoringTypes.h"
#include <vector>
#include <map>
#include "NvPlane.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include <set>
#define SAFE_ARRAY_NEW(T, x) ((x) > 0) ? reinterpret_cast<T*>(NVBLAST_ALLOC(sizeof(T) * (x))) : nullptr;
//#define DEBUG_OUTPUT
#ifdef DEBUG_OUTPUT
void saveGeometryToObj(std::vector<NvVec3>& triangles, const char* filepath)
{
FILE* outStream = fopen(filepath, "w");
for (uint32_t i = 0; i < triangles.size(); ++i)
{
fprintf(outStream, "v %lf %lf %lf\n", triangles[i].x, triangles[i].y, triangles[i].z);
++i;
fprintf(outStream, "v %lf %lf %lf\n", triangles[i].x, triangles[i].y, triangles[i].z);
++i;
fprintf(outStream, "v %lf %lf %lf\n", triangles[i].x, triangles[i].y, triangles[i].z);
}
for (uint32_t i = 0; i < triangles.size() / 3; ++i)
{
NvVec3 normal =
(triangles[3 * i + 2] - triangles[3 * i]).cross((triangles[3 * i + 1] - triangles[3 * i])).getNormalized();
fprintf(outStream, "vn %lf %lf %lf\n", normal.x, normal.y, normal.z);
fprintf(outStream, "vn %lf %lf %lf\n", normal.x, normal.y, normal.z);
fprintf(outStream, "vn %lf %lf %lf\n", normal.x, normal.y, normal.z);
}
int indx = 1;
for (uint32_t i = 0; i < triangles.size() / 3; ++i)
{
fprintf(outStream, "f %d//%d ", indx, indx);
indx++;
fprintf(outStream, "%d//%d ", indx, indx);
indx++;
fprintf(outStream, "%d//%d \n", indx, indx);
indx++;
}
fclose(outStream);
}
std::vector<NvVec3> intersectionBuffer;
std::vector<NvVec3> meshBuffer;
#endif
namespace Nv
{
namespace Blast
{
#define EPS_PLANE 0.0001f
nvidia::NvVec3 getNormal(const Triangle& t)
{
return toNvShared(t.b.p - t.a.p).cross(toNvShared(t.c.p - t.a.p));
}
bool planeComparer(const PlaneChunkIndexer& as, const PlaneChunkIndexer& bs)
{
const NvcPlane& a = as.plane;
const NvcPlane& b = bs.plane;
if (a.d + EPS_PLANE < b.d)
return true;
if (a.d - EPS_PLANE > b.d)
return false;
if (a.n.x + EPS_PLANE < b.n.x)
return true;
if (a.n.x - EPS_PLANE > b.n.x)
return false;
if (a.n.y + EPS_PLANE < b.n.y)
return true;
if (a.n.y - EPS_PLANE > b.n.y)
return false;
return a.n.z + EPS_PLANE < b.n.z;
}
struct Bond
{
int32_t m_chunkId;
int32_t m_planeIndex;
int32_t triangleIndex;
bool operator<(const Bond& inp) const
{
if (abs(m_planeIndex) == abs(inp.m_planeIndex))
{
return m_chunkId < inp.m_chunkId;
}
else
{
return abs(m_planeIndex) < abs(inp.m_planeIndex);
}
}
};
struct BondInfo
{
float area;
nvidia::NvBounds3 m_bb;
nvidia::NvVec3 centroid;
nvidia::NvVec3 normal;
int32_t m_chunkId;
};
inline nvidia::NvVec3 getVertex(const Triangle& t, uint32_t i)
{
return toNvShared((&t.a)[i].p);
}
void AddTtAnchorPoints(const Triangle* a, const Triangle* b, std::vector<NvVec3>& points)
{
nvidia::NvVec3 na = getNormal(*a).getNormalized();
nvidia::NvVec3 nb = getNormal(*b).getNormalized();
nvidia::NvPlane pla(toNvShared(a->a.p), na);
nvidia::NvPlane plb(toNvShared(b->a.p), nb);
ProjectionDirections da = getProjectionDirection(na);
ProjectionDirections db = getProjectionDirection(nb);
TriangleProcessor prc;
TrPrcTriangle2d ta(getProjectedPoint(toNvShared(a->a.p), da), getProjectedPoint(toNvShared(a->b.p), da),
getProjectedPoint(toNvShared(a->c.p), da));
TrPrcTriangle2d tb(getProjectedPoint(toNvShared(b->a.p), db), getProjectedPoint(toNvShared(b->b.p), db),
getProjectedPoint(toNvShared(b->c.p), db));
/**
Compute
*/
for (uint32_t i = 0; i < 3; ++i)
{
nvidia::NvVec3 pt;
if (getPlaneSegmentIntersection(pla, getVertex(*b, i), getVertex(*b, (i + 1) % 3), pt))
{
nvidia::NvVec2 pt2 = getProjectedPoint(pt, da);
if (prc.isPointInside(pt2, ta))
{
points.push_back(pt);
}
}
if (getPlaneSegmentIntersection(plb, getVertex(*a, i), getVertex(*a, (i + 1) % 3), pt))
{
NvVec2 pt2 = getProjectedPoint(pt, db);
if (prc.isPointInside(pt2, tb))
{
points.push_back(pt);
}
}
}
}
inline bool
pointInsidePoly(const NvVec3& pt, const uint8_t* indices, uint16_t indexCount, const NvVec3* verts, const NvVec3& n)
{
int s = 0;
for (uint16_t i = 0; i < indexCount; ++i)
{
const NvVec3 r0 = verts[indices[i]] - pt;
const NvVec3 r1 = verts[indices[(i + 1) % indexCount]] - pt;
const float cn = r0.cross(r1).dot(n);
const int cns = cn >= 0 ? 1 : -1;
if (!s)
{
s = cns;
}
if (cns * s < 0)
{
return false;
}
}
return true;
}
void AddPpAnchorPoints(const uint8_t* indicesA, uint16_t indexCountA, const NvVec3* vertsA, const float planeA[4],
const uint8_t* indicesB, uint16_t indexCountB, const NvVec3* vertsB, const float planeB[4],
std::vector<NvVec3>& points)
{
NvPlane pla(planeA[0], planeA[1], planeA[2], planeA[3]);
NvPlane plb(planeB[0], planeB[1], planeB[2], planeB[3]);
for (uint16_t iA = 0; iA < indexCountA; ++iA)
{
NvVec3 pt;
if (getPlaneSegmentIntersection(plb, vertsA[indicesA[iA]], vertsA[indicesA[(iA + 1) % indexCountA]], pt))
{
if (pointInsidePoly(pt, indicesB, indexCountB, vertsB, plb.n))
{
points.push_back(pt);
}
}
}
for (uint16_t iB = 0; iB < indexCountA; ++iB)
{
NvVec3 pt;
if (getPlaneSegmentIntersection(pla, vertsB[indicesB[iB]], vertsB[indicesA[(iB + 1) % indexCountB]], pt))
{
if (pointInsidePoly(pt, indicesA, indexCountA, vertsA, pla.n))
{
points.push_back(pt);
}
}
}
}
float BlastBondGeneratorImpl::processWithMidplanes(TriangleProcessor* trProcessor, const Triangle* mA, uint32_t mavc,
const Triangle* mB, uint32_t mbvc, const CollisionHull* hull1,
const CollisionHull* hull2, const std::vector<NvVec3>& hull1p,
const std::vector<NvVec3>& hull2p, NvVec3& normal, NvVec3& centroid,
float maxRelSeparation)
{
NvBounds3 bounds;
NvBounds3 aBounds;
NvBounds3 bBounds;
bounds.setEmpty();
aBounds.setEmpty();
bBounds.setEmpty();
NvVec3 chunk1Centroid(0, 0, 0);
NvVec3 chunk2Centroid(0, 0, 0);
///////////////////////////////////////////////////////////////////////////////////
if (hull1p.size() < 4 || hull2p.size() < 4)
{
return 0.0f;
}
for (uint32_t i = 0; i < hull1p.size(); ++i)
{
chunk1Centroid += hull1p[i];
bounds.include(hull1p[i]);
aBounds.include(hull1p[i]);
}
for (uint32_t i = 0; i < hull2p.size(); ++i)
{
chunk2Centroid += hull2p[i];
bounds.include(hull2p[i]);
bBounds.include(hull2p[i]);
}
chunk1Centroid *= (1.0f / hull1p.size());
chunk2Centroid *= (1.0f / hull2p.size());
const float maxSeparation = maxRelSeparation * std::sqrt(std::max(aBounds.getExtents().magnitudeSquared(), bBounds.getExtents().magnitudeSquared()));
Separation separation;
if (!importerHullsInProximityApexFree(hull1p.size(), hull1p.data(), aBounds, NvTransform(NvIdentity),
NvVec3(1, 1, 1), hull2p.size(), hull2p.data(), bBounds,
NvTransform(NvIdentity), NvVec3(1, 1, 1), 2.0f * maxSeparation, &separation))
{
return 0.0f;
}
const bool have_geometry = (mA != nullptr && mB != nullptr) || (hull1 != nullptr && hull2 != nullptr);
if (separation.getDistance() > 0 || !have_geometry) // If chunks don't intersect then use midplane to produce bond,
// otherwise midplane can be wrong (only if we have geometry)
{
// Build first plane interface
NvPlane midplane = separation.plane;
if (!midplane.n.isFinite())
{
return 0.0f;
}
std::vector<NvVec3> interfacePoints;
float firstCentroidSide = (midplane.distance(chunk1Centroid) > 0) ? 1 : -1;
float secondCentroidSide = (midplane.distance(chunk2Centroid) > 0) ? 1 : -1;
for (uint32_t i = 0; i < hull1p.size(); ++i)
{
float dst = midplane.distance(hull1p[i]);
if (dst * firstCentroidSide < maxSeparation)
{
interfacePoints.push_back(hull1p[i]);
}
}
for (uint32_t i = 0; i < hull2p.size(); ++i)
{
float dst = midplane.distance(hull2p[i]);
if (dst * secondCentroidSide < maxSeparation)
{
interfacePoints.push_back(hull2p[i]);
}
}
std::vector<NvVec3> convexHull;
trProcessor->buildConvexHull(interfacePoints, convexHull, midplane.n);
float area = 0;
NvVec3 centroidLocal(0, 0, 0);
if (convexHull.size() < 3)
{
return 0.0f;
}
for (uint32_t i = 0; i < convexHull.size() - 1; ++i)
{
centroidLocal += convexHull[i];
area += (convexHull[i] - convexHull[0]).cross((convexHull[i + 1] - convexHull[0])).magnitude();
}
centroidLocal += convexHull.back();
centroidLocal *= (1.0f / convexHull.size());
float direction = midplane.n.dot(chunk2Centroid - chunk1Centroid);
if (direction < 0)
{
normal = -1.0f * normal;
}
normal = midplane.n;
centroid = centroidLocal;
return area * 0.5f;
}
else
{
float area = 0.0f;
std::vector<NvVec3> intersectionAnchors;
if (hull1 != nullptr && hull2 != nullptr) // Use hulls
{
for (uint32_t i1 = 0; i1 < hull1->polygonDataCount; ++i1)
{
HullPolygon& poly1 = hull1->polygonData[i1];
for (uint32_t i2 = 0; i2 < hull2->polygonDataCount; ++i2)
{
HullPolygon& poly2 = hull2->polygonData[i2];
AddPpAnchorPoints(reinterpret_cast<uint8_t*>(hull1->indices) + poly1.indexBase, poly1.vertexCount,
toNvShared(hull1->points), poly1.plane,
reinterpret_cast<uint8_t*>(hull2->indices) + poly2.indexBase, poly2.vertexCount,
toNvShared(hull2->points), poly2.plane, intersectionAnchors);
}
}
}
else if (mA != nullptr && mB != nullptr) // Use triangles
{
for (uint32_t i = 0; i < mavc; ++i)
{
for (uint32_t j = 0; j < mbvc; ++j)
{
AddTtAnchorPoints(mA + i, mB + j, intersectionAnchors);
}
}
}
else
{
NVBLAST_ASSERT_WITH_MESSAGE(false, "collision hulls and triangle data are both invalid, this shouldn't happen");
return 0.0f;
}
NvVec3 lcoid(0, 0, 0);
for (uint32_t i = 0; i < intersectionAnchors.size(); ++i)
{
lcoid += intersectionAnchors[i];
}
lcoid *= (1.0f / intersectionAnchors.size());
centroid = lcoid;
if (intersectionAnchors.size() < 2)
{
return 0.0f;
}
NvVec3 dir1 = intersectionAnchors[0] - lcoid;
NvVec3 dir2 = chunk2Centroid - chunk1Centroid; // A more reasonable fallback than (0,0,0)
float maxMagn = 0.0f;
float maxDist = 0.0f;
for (uint32_t j = 0; j < intersectionAnchors.size(); ++j)
{
float d = (intersectionAnchors[j] - lcoid).magnitude();
NvVec3 tempNormal = (intersectionAnchors[j] - lcoid).cross(dir1);
maxDist = std::max(d, maxDist);
if (tempNormal.magnitude() > maxMagn)
{
dir2 = tempNormal;
}
}
normal = dir2.getNormalized();
area = (maxDist * maxDist) * 3.14f; // Compute area like circle area;
return area;
}
}
struct BondGenerationCandidate
{
NvVec3 point;
bool end;
uint32_t parentChunk;
uint32_t parentComponent;
BondGenerationCandidate();
BondGenerationCandidate(const NvVec3& p, bool isEnd, uint32_t pr, uint32_t c)
: point(p), end(isEnd), parentChunk(pr), parentComponent(c){};
bool operator<(const BondGenerationCandidate& in) const
{
if (point.x < in.point.x)
return true;
if (point.x > in.point.x)
return false;
if (point.y < in.point.y)
return true;
if (point.y > in.point.y)
return false;
if (point.z < in.point.z)
return true;
if (point.z > in.point.z)
return false;
return end < in.end;
};
};
int32_t BlastBondGeneratorImpl::createFullBondListAveraged(uint32_t meshCount, const uint32_t* geometryOffset,
const Triangle* geometry, const CollisionHull** chunkHulls,
const bool* supportFlags, const uint32_t* meshGroups,
NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf,
std::set<std::pair<uint32_t, uint32_t> >* pairNotToTest)
{
std::vector<std::vector<NvcVec3> > chunksPoints(meshCount);
std::vector<NvBounds3> bounds(meshCount);
if (!chunkHulls)
{
for (uint32_t i = 0; i < meshCount; ++i)
{
bounds[i].setEmpty();
if (!supportFlags[i])
{
continue;
}
uint32_t count = geometryOffset[i + 1] - geometryOffset[i];
for (uint32_t j = 0; j < count; ++j)
{
chunksPoints[i].push_back(geometry[geometryOffset[i] + j].a.p);
chunksPoints[i].push_back(geometry[geometryOffset[i] + j].b.p);
chunksPoints[i].push_back(geometry[geometryOffset[i] + j].c.p);
bounds[i].include(toNvShared(geometry[geometryOffset[i] + j].a.p));
bounds[i].include(toNvShared(geometry[geometryOffset[i] + j].b.p));
bounds[i].include(toNvShared(geometry[geometryOffset[i] + j].c.p));
}
}
}
std::vector<std::vector<std::vector<NvVec3> > > hullPoints(meshCount);
std::vector<BondGenerationCandidate> candidates;
std::vector<CollisionHull*> tempChunkHulls(meshCount, nullptr);
for (uint32_t chunk = 0; chunk < meshCount; ++chunk)
{
if (!supportFlags[chunk])
{
continue;
}
NvBounds3 bnd(NvBounds3::empty());
uint32_t hullCountForMesh = 0;
const CollisionHull** beginChunkHulls = nullptr;
if (chunkHulls)
{
hullCountForMesh = geometryOffset[chunk + 1] - geometryOffset[chunk];
beginChunkHulls = chunkHulls + geometryOffset[chunk];
}
else
{
// build a convex hull and store it in the temp slot
tempChunkHulls[chunk] =
mConvexMeshBuilder->buildCollisionGeometry(chunksPoints[chunk].size(), chunksPoints[chunk].data());
hullCountForMesh = 1;
beginChunkHulls = const_cast<const CollisionHull**>(&tempChunkHulls[chunk]);
}
hullPoints[chunk].resize(hullCountForMesh);
for (uint32_t hull = 0; hull < hullCountForMesh; ++hull)
{
auto& curHull = hullPoints[chunk][hull];
const uint32_t pointCount = beginChunkHulls[hull]->pointsCount;
curHull.resize(pointCount);
for (uint32_t i = 0; i < pointCount; ++i)
{
curHull[i] = toNvShared(beginChunkHulls[hull]->points[i]);
bnd.include(curHull[i]);
}
}
float minSide = bnd.getDimensions().abs().minElement();
if (minSide > 0.f)
{
float scaling = std::max(1.1f, conf.maxSeparation / (minSide));
bnd.scaleFast(scaling);
}
candidates.push_back(
BondGenerationCandidate(bnd.minimum, false, chunk, meshGroups != nullptr ? meshGroups[chunk] : 0));
candidates.push_back(
BondGenerationCandidate(bnd.maximum, true, chunk, meshGroups != nullptr ? meshGroups[chunk] : 0));
}
std::sort(candidates.begin(), candidates.end());
std::set<uint32_t> listOfActiveChunks;
std::vector<std::vector<uint32_t> > possibleBondGraph(meshCount);
for (uint32_t idx = 0; idx < candidates.size(); ++idx)
{
if (!candidates[idx].end) // If new candidate
{
for (uint32_t activeChunk : listOfActiveChunks)
{
if (meshGroups != nullptr && (meshGroups[activeChunk] == candidates[idx].parentComponent))
continue; // Don't connect components with itself.
possibleBondGraph[activeChunk].push_back(candidates[idx].parentChunk);
}
listOfActiveChunks.insert(candidates[idx].parentChunk);
}
else
{
listOfActiveChunks.erase(candidates[idx].parentChunk);
}
}
TriangleProcessor trProcessor;
std::vector<NvBlastBondDesc> mResultBondDescs;
for (uint32_t i = 0; i < meshCount; ++i)
{
const uint32_t ihullCount = hullPoints[i].size();
for (uint32_t tj = 0; tj < possibleBondGraph[i].size(); ++tj)
{
uint32_t j = possibleBondGraph[i][tj];
auto pr = (i < j) ? std::make_pair(i, j) : std::make_pair(j, i);
if (pairNotToTest != nullptr && pairNotToTest->find(pr) != pairNotToTest->end())
{
continue; // This chunks should not generate bonds. This is used for mixed generation with bondFrom
}
const uint32_t jhullCount = hullPoints[j].size();
for (uint32_t ihull = 0; ihull < ihullCount; ++ihull)
{
for (uint32_t jhull = 0; jhull < jhullCount; ++jhull)
{
NvVec3 normal;
NvVec3 centroid;
float area = processWithMidplanes(
&trProcessor, geometry ? geometry + geometryOffset[i] : nullptr,
geometryOffset[i + 1] - geometryOffset[i], geometry ? geometry + geometryOffset[j] : nullptr,
geometryOffset[j + 1] - geometryOffset[j],
chunkHulls ? chunkHulls[geometryOffset[i] + ihull] : tempChunkHulls[i],
chunkHulls ? chunkHulls[geometryOffset[j] + jhull] : tempChunkHulls[j],
hullPoints[i][ihull], hullPoints[j][jhull], normal, centroid, conf.maxSeparation);
if (area > 0)
{
NvBlastBondDesc bDesc = NvBlastBondDesc();
bDesc.chunkIndices[0] = i;
bDesc.chunkIndices[1] = j;
bDesc.bond.area = area;
bDesc.bond.centroid[0] = centroid.x;
bDesc.bond.centroid[1] = centroid.y;
bDesc.bond.centroid[2] = centroid.z;
uint32_t maxIndex = std::max(i, j);
if ((bounds[maxIndex].getCenter() - centroid).dot(normal) < 0)
{
normal = -normal;
}
bDesc.bond.normal[0] = normal.x;
bDesc.bond.normal[1] = normal.y;
bDesc.bond.normal[2] = normal.z;
mResultBondDescs.push_back(bDesc);
}
}
}
}
}
// release any temp hulls allocated
for (CollisionHull* tempHullPtr : tempChunkHulls)
{
if (tempHullPtr)
{
mConvexMeshBuilder->releaseCollisionHull(tempHullPtr);
}
}
resultBondDescs = SAFE_ARRAY_NEW(NvBlastBondDesc, mResultBondDescs.size());
memcpy(resultBondDescs, mResultBondDescs.data(), sizeof(NvBlastBondDesc) * mResultBondDescs.size());
return mResultBondDescs.size();
}
uint32_t isSamePlane(NvcPlane& a, NvcPlane& b)
{
if (NvAbs(a.d - b.d) > EPS_PLANE)
return 0;
if (NvAbs(a.n.x - b.n.x) > EPS_PLANE)
return 0;
if (NvAbs(a.n.y - b.n.y) > EPS_PLANE)
return 0;
if (NvAbs(a.n.z - b.n.z) > EPS_PLANE)
return 0;
return 1;
}
int32_t BlastBondGeneratorImpl::createFullBondListExact(uint32_t meshCount, const uint32_t* geometryOffset,
const Triangle* geometry, const bool* supportFlags,
NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf)
{
std::vector<PlaneChunkIndexer> planeTriangleMapping;
NV_UNUSED(conf);
for (uint32_t i = 0; i < meshCount; ++i)
{
if (!supportFlags[i])
{
continue;
}
uint32_t count = geometryOffset[i + 1] - geometryOffset[i];
for (uint32_t j = 0; j < count; ++j)
{
#ifdef DEBUG_OUTPUT
meshBuffer.push_back(geometry[geometryOffset[i] + j].a.p);
meshBuffer.push_back(geometry[geometryOffset[i] + j].b.p);
meshBuffer.push_back(geometry[geometryOffset[i] + j].c.p);
#endif
NvcPlane nPlane = fromNvShared(nvidia::NvPlane(toNvShared(geometry[geometryOffset[i] + j].a.p),
toNvShared(geometry[geometryOffset[i] + j].b.p),
toNvShared(geometry[geometryOffset[i] + j].c.p)));
planeTriangleMapping.push_back({ (int32_t)i, (int32_t)j, nPlane });
}
}
std::sort(planeTriangleMapping.begin(), planeTriangleMapping.end(), planeComparer);
return createFullBondListExactInternal(meshCount, geometryOffset, geometry, planeTriangleMapping, resultBondDescs);
}
void BlastBondGeneratorImpl::buildGeometryCache(uint32_t meshCount, const uint32_t* geometryOffset,
const Triangle* geometry)
{
uint32_t geometryCount = geometryOffset[meshCount];
for (uint32_t i = 0; i < meshCount; i++)
{
mGeometryCache.push_back(std::vector<Triangle>());
uint32_t count = geometryOffset[i + 1] - geometryOffset[i];
mGeometryCache.back().resize(count);
memcpy(mGeometryCache.back().data(), geometry + geometryOffset[i], sizeof(Triangle) * count);
}
mHullsPointsCache.resize(geometryCount);
mBoundsCache.resize(geometryCount);
mCHullCache.resize(geometryCount);
for (uint32_t i = 0; i < mGeometryCache.size(); ++i)
{
for (uint32_t j = 0; j < mGeometryCache[i].size(); ++j)
{
NvcPlane nPlane =
fromNvShared(nvidia::NvPlane(toNvShared(mGeometryCache[i][j].a.p), toNvShared(mGeometryCache[i][j].b.p),
toNvShared(mGeometryCache[i][j].c.p)));
mPlaneCache.push_back({ (int32_t)i, (int32_t)j, nPlane });
}
}
for (uint32_t ch = 0; ch < mGeometryCache.size(); ++ch)
{
std::vector<NvcVec3> chunksPoints(mGeometryCache[ch].size() * 3);
int32_t sp = 0;
for (uint32_t i = 0; i < mGeometryCache[ch].size(); ++i)
{
chunksPoints[sp++] = mGeometryCache[ch][i].a.p;
chunksPoints[sp++] = mGeometryCache[ch][i].b.p;
chunksPoints[sp++] = mGeometryCache[ch][i].c.p;
}
mCHullCache[ch] = mConvexMeshBuilder->buildCollisionGeometry(chunksPoints.size(), chunksPoints.data());
mHullsPointsCache[ch].resize(mCHullCache[ch]->pointsCount);
mBoundsCache[ch].setEmpty();
for (uint32_t i = 0; i < mCHullCache[ch]->pointsCount; ++i)
{
mHullsPointsCache[ch][i] = toNvShared(mCHullCache[ch]->points[i]);
mBoundsCache[ch].include(mHullsPointsCache[ch][i]);
}
}
}
void BlastBondGeneratorImpl::resetGeometryCache()
{
mGeometryCache.clear();
mPlaneCache.clear();
mHullsPointsCache.clear();
for (auto h : mCHullCache)
{
mConvexMeshBuilder->releaseCollisionHull(h);
}
mCHullCache.clear();
mBoundsCache.clear();
}
int32_t BlastBondGeneratorImpl::createFullBondListExactInternal(uint32_t meshCount, const uint32_t* geometryOffset,
const Triangle* geometry,
std::vector<PlaneChunkIndexer>& planeTriangleMapping,
NvBlastBondDesc*& resultBondDescs)
{
NV_UNUSED(meshCount);
std::map<std::pair<int32_t, int32_t>, std::pair<NvBlastBondDesc, int32_t> > bonds;
TriangleProcessor trPrc;
std::vector<NvVec3> intersectionBufferLocal;
NvBlastBondDesc cleanBond = NvBlastBondDesc();
memset(&cleanBond, 0, sizeof(NvBlastBondDesc));
for (uint32_t tIndex = 0; tIndex < planeTriangleMapping.size(); ++tIndex)
{
PlaneChunkIndexer opp = planeTriangleMapping[tIndex];
opp.plane.d *= -1;
opp.plane.n = opp.plane.n * - 1;
uint32_t startIndex =
(uint32_t)(std::lower_bound(planeTriangleMapping.begin(), planeTriangleMapping.end(), opp, planeComparer) -
planeTriangleMapping.begin());
uint32_t endIndex =
(uint32_t)(std::upper_bound(planeTriangleMapping.begin(), planeTriangleMapping.end(), opp, planeComparer) -
planeTriangleMapping.begin());
// uint32_t startIndex = 0;
// uint32_t endIndex = (uint32_t)planeTriangleMapping.size();
PlaneChunkIndexer& mappedTr = planeTriangleMapping[tIndex];
const Triangle& trl = geometry[geometryOffset[mappedTr.chunkId] + mappedTr.trId];
NvPlane pln = toNvShared(mappedTr.plane);
TrPrcTriangle trp(toNvShared(trl.a.p), toNvShared(trl.b.p), toNvShared(trl.c.p));
NvVec3 trCentroid = toNvShared(trl.a.p + trl.b.p + trl.c.p) * (1.0f / 3.0f);
trp.points[0] -= trCentroid;
trp.points[1] -= trCentroid;
trp.points[2] -= trCentroid;
ProjectionDirections pDir = getProjectionDirection(pln.n);
TrPrcTriangle2d trp2d;
trp2d.points[0] = getProjectedPointWithWinding(trp.points[0], pDir);
trp2d.points[1] = getProjectedPointWithWinding(trp.points[1], pDir);
trp2d.points[2] = getProjectedPointWithWinding(trp.points[2], pDir);
for (uint32_t i = startIndex; i <= endIndex && i < planeTriangleMapping.size(); ++i)
{
PlaneChunkIndexer& mappedTr2 = planeTriangleMapping[i];
if (mappedTr2.trId == opp.chunkId)
{
continue;
}
if (!isSamePlane(opp.plane, mappedTr2.plane))
{
continue;
}
if (mappedTr.chunkId == mappedTr2.chunkId)
{
continue;
}
std::pair<int32_t, int32_t> bondEndPoints = std::make_pair(mappedTr.chunkId, mappedTr2.chunkId);
if (bondEndPoints.second < bondEndPoints.first)
continue;
std::pair<int32_t, int32_t> bondEndPointsSwapped = std::make_pair(mappedTr2.chunkId, mappedTr.chunkId);
if (bonds.find(bondEndPoints) == bonds.end() && bonds.find(bondEndPointsSwapped) != bonds.end())
{
continue; // We do not need account interface surface twice
}
if (bonds.find(bondEndPoints) == bonds.end())
{
bonds[bondEndPoints].second = 0;
bonds[bondEndPoints].first = cleanBond;
bonds[bondEndPoints].first.chunkIndices[0] = bondEndPoints.first;
bonds[bondEndPoints].first.chunkIndices[1] = bondEndPoints.second;
bonds[bondEndPoints].first.bond.normal[0] = pln.n[0];
bonds[bondEndPoints].first.bond.normal[1] = pln.n[1];
bonds[bondEndPoints].first.bond.normal[2] = pln.n[2];
}
const Triangle& trl2 = geometry[geometryOffset[mappedTr2.chunkId] + mappedTr2.trId];
TrPrcTriangle trp2(toNvShared(trl2.a.p), toNvShared(trl2.b.p), toNvShared(trl2.c.p));
intersectionBufferLocal.clear();
intersectionBufferLocal.reserve(32);
trPrc.getTriangleIntersection(trp, trp2d, trp2, trCentroid, intersectionBufferLocal, pln.n);
NvVec3 centroidPoint(0, 0, 0);
int32_t collectedVerticesCount = 0;
float area = 0;
if (intersectionBufferLocal.size() >= 3)
{
#ifdef DEBUG_OUTPUT
for (uint32_t p = 1; p < intersectionBufferLocal.size() - 1; ++p)
{
intersectionBuffer.push_back(intersectionBufferLocal[0]);
intersectionBuffer.push_back(intersectionBufferLocal[p]);
intersectionBuffer.push_back(intersectionBufferLocal[p + 1]);
}
#endif
centroidPoint = intersectionBufferLocal[0] + intersectionBufferLocal.back();
collectedVerticesCount = 2;
for (uint32_t j = 1; j < intersectionBufferLocal.size() - 1; ++j)
{
++collectedVerticesCount;
centroidPoint += intersectionBufferLocal[j];
area += (intersectionBufferLocal[j + 1] - intersectionBufferLocal[0])
.cross(intersectionBufferLocal[j] - intersectionBufferLocal[0])
.magnitude();
}
}
if (area > 0.00001f)
{
bonds[bondEndPoints].second += collectedVerticesCount;
bonds[bondEndPoints].first.bond.area += area * 0.5f;
bonds[bondEndPoints].first.bond.centroid[0] += (centroidPoint.x);
bonds[bondEndPoints].first.bond.centroid[1] += (centroidPoint.y);
bonds[bondEndPoints].first.bond.centroid[2] += (centroidPoint.z);
}
}
}
std::vector<NvBlastBondDesc> mResultBondDescs;
for (auto it : bonds)
{
if (it.second.first.bond.area > 0)
{
float mlt = 1.0f / (it.second.second);
it.second.first.bond.centroid[0] *= mlt;
it.second.first.bond.centroid[1] *= mlt;
it.second.first.bond.centroid[2] *= mlt;
mResultBondDescs.push_back(it.second.first);
}
}
#ifdef DEBUG_OUTPUT
saveGeometryToObj(meshBuffer, "Mesh.obj");
saveGeometryToObj(intersectionBuffer, "inter.obj");
#endif
resultBondDescs = SAFE_ARRAY_NEW(NvBlastBondDesc, mResultBondDescs.size());
memcpy(resultBondDescs, mResultBondDescs.data(), sizeof(NvBlastBondDesc) * mResultBondDescs.size());
return mResultBondDescs.size();
}
int32_t BlastBondGeneratorImpl::createBondForcedInternal(const std::vector<NvVec3>& hull0,
const std::vector<NvVec3>& hull1, const CollisionHull& cHull0,
const CollisionHull& cHull1, NvBounds3 bound0,
NvBounds3 bound1, NvBlastBond& resultBond, float overlapping)
{
TriangleProcessor trProcessor;
Separation separation;
importerHullsInProximityApexFree(hull0.size(), hull0.data(), bound0, NvTransform(NvIdentity), NvVec3(1, 1, 1),
hull1.size(), hull1.data(), bound1, NvTransform(NvIdentity), NvVec3(1, 1, 1),
0.000, &separation);
if (std::isnan(separation.plane.d))
{
importerHullsInProximityApexFree(
hull0.size(), hull0.data(), bound0, NvTransform(NvVec3(0.000001f, 0.000001f, 0.000001f)), NvVec3(1, 1, 1),
hull1.size(), hull1.data(), bound1, NvTransform(NvIdentity), NvVec3(1, 1, 1), 0.000, &separation);
if (std::isnan(separation.plane.d))
{
return 1;
}
}
NvPlane pl = separation.plane;
std::vector<NvVec3> ifsPoints[2];
float dst[2][2];
dst[0][0] = 0;
dst[0][1] = MAXIMUM_EXTENT;
for (uint32_t p = 0; p < cHull0.pointsCount; ++p)
{
float d = pl.distance(toNvShared(cHull0.points[p]));
if (NvAbs(d) > NvAbs(dst[0][0]))
{
dst[0][0] = d;
}
if (NvAbs(d) < NvAbs(dst[0][1]))
{
dst[0][1] = d;
}
}
dst[1][0] = 0;
dst[1][1] = MAXIMUM_EXTENT;
for (uint32_t p = 0; p < cHull1.pointsCount; ++p)
{
float d = pl.distance(toNvShared(cHull0.points[p]));
if (NvAbs(d) > NvAbs(dst[1][0]))
{
dst[1][0] = d;
}
if (NvAbs(d) < NvAbs(dst[1][1]))
{
dst[1][1] = d;
}
}
float cvOffset[2] = { dst[0][1] + (dst[0][0] - dst[0][1]) * overlapping,
dst[1][1] + (dst[1][0] - dst[1][1]) * overlapping };
for (uint32_t i = 0; i < cHull0.polygonDataCount; ++i)
{
auto& pd = cHull0.polygonData[i];
NvVec3 result;
for (uint32_t j = 0; j < pd.vertexCount; ++j)
{
uint32_t nxj = (j + 1) % pd.vertexCount;
const uint32_t* ind = cHull0.indices;
NvVec3 a = hull0[ind[j + pd.indexBase]] - pl.n * cvOffset[0];
NvVec3 b = hull0[ind[nxj + pd.indexBase]] - pl.n * cvOffset[0];
if (getPlaneSegmentIntersection(pl, a, b, result))
{
ifsPoints[0].push_back(result);
}
}
}
for (uint32_t i = 0; i < cHull1.polygonDataCount; ++i)
{
auto& pd = cHull1.polygonData[i];
NvVec3 result;
for (uint32_t j = 0; j < pd.vertexCount; ++j)
{
uint32_t nxj = (j + 1) % pd.vertexCount;
const uint32_t* ind = cHull1.indices;
NvVec3 a = hull1[ind[j + pd.indexBase]] - pl.n * cvOffset[1];
NvVec3 b = hull1[ind[nxj + pd.indexBase]] - pl.n * cvOffset[1];
if (getPlaneSegmentIntersection(pl, a, b, result))
{
ifsPoints[1].push_back(result);
}
}
}
std::vector<NvVec3> convexes[2];
trProcessor.buildConvexHull(ifsPoints[0], convexes[0], pl.n);
trProcessor.buildConvexHull(ifsPoints[1], convexes[1], pl.n);
float areas[2] = { 0, 0 };
NvVec3 centroids[2] = { NvVec3(0, 0, 0), NvVec3(0, 0, 0) };
for (uint32_t cv = 0; cv < 2; ++cv)
{
if (convexes[cv].size() == 0)
{
continue;
}
centroids[cv] = convexes[cv][0] + convexes[cv].back();
for (uint32_t i = 1; i < convexes[cv].size() - 1; ++i)
{
centroids[cv] += convexes[cv][i];
areas[cv] += (convexes[cv][i + 1] - convexes[cv][0]).cross(convexes[cv][i] - convexes[cv][0]).magnitude();
#ifdef DEBUG_OUTPUT
intersectionBuffer.push_back(convexes[cv][0]);
intersectionBuffer.push_back(convexes[cv][i]);
intersectionBuffer.push_back(convexes[cv][i + 1]);
#endif
}
centroids[cv] *= (1.0f / convexes[cv].size());
areas[cv] = NvAbs(areas[cv]);
}
resultBond.area = (areas[0] + areas[1]) * 0.5f;
resultBond.centroid[0] = (centroids[0][0] + centroids[1][0]) * 0.5f;
resultBond.centroid[1] = (centroids[0][1] + centroids[1][1]) * 0.5f;
resultBond.centroid[2] = (centroids[0][2] + centroids[1][2]) * 0.5f;
resultBond.normal[0] = pl.n[0];
resultBond.normal[1] = pl.n[1];
resultBond.normal[2] = pl.n[2];
resultBond.userData = 0;
#ifdef DEBUG_OUTPUT
saveGeometryToObj(meshBuffer, "ArbitMeshes.obj");
saveGeometryToObj(intersectionBuffer, "inter.obj");
#endif
return 0;
}
int32_t BlastBondGeneratorImpl::buildDescFromInternalFracture(FractureTool* tool, const bool* chunkIsSupport,
NvBlastBondDesc*& resultBondDescs,
NvBlastChunkDesc*& resultChunkDescriptors)
{
uint32_t chunkCount = tool->getChunkCount();
std::vector<uint32_t> trianglesCount(chunkCount);
std::vector<std::shared_ptr<Triangle> > trianglesBuffer;
for (uint32_t i = 0; i < chunkCount; ++i)
{
Triangle* t;
trianglesCount[i] = tool->getBaseMesh(i, t);
trianglesBuffer.push_back(std::shared_ptr<Triangle>(t, [](Triangle* t) { delete[] t; }));
}
if (chunkCount == 0)
{
return 0;
}
resultChunkDescriptors = SAFE_ARRAY_NEW(NvBlastChunkDesc, trianglesBuffer.size());
std::vector<Bond> bondDescriptors;
bool hasApproximateBonding = false;
for (uint32_t i = 0; i < chunkCount; ++i)
{
NvBlastChunkDesc& desc = resultChunkDescriptors[i];
desc.userData = tool->getChunkId(i);
desc.parentChunkDescIndex = tool->getChunkInfoIndex(tool->getChunkInfo(i).parentChunkId);
desc.flags = NvBlastChunkDesc::NoFlags;
hasApproximateBonding |= !!(tool->getChunkInfo(i).flags & ChunkInfo::APPROXIMATE_BONDING);
if (chunkIsSupport[i])
{
desc.flags = NvBlastChunkDesc::SupportFlag;
}
NvVec3 chunkCentroid(0, 0, 0);
for (uint32_t tr = 0; tr < trianglesCount[i]; ++tr)
{
auto& trRef = trianglesBuffer[i].get()[tr];
chunkCentroid += toNvShared(trRef.a.p);
chunkCentroid += toNvShared(trRef.b.p);
chunkCentroid += toNvShared(trRef.c.p);
int32_t id = trRef.userData;
if (id == 0)
continue;
bondDescriptors.push_back(Bond());
Bond& bond = bondDescriptors.back();
bond.m_chunkId = i;
bond.m_planeIndex = id;
bond.triangleIndex = tr;
}
chunkCentroid *= (1.0f / (3 * trianglesCount[i]));
desc.centroid[0] = chunkCentroid[0];
desc.centroid[1] = chunkCentroid[1];
desc.centroid[2] = chunkCentroid[2];
}
std::sort(bondDescriptors.begin(), bondDescriptors.end());
std::vector<NvBlastBondDesc> mResultBondDescs;
if (!bondDescriptors.empty())
{
int32_t chunkId, planeId;
chunkId = bondDescriptors[0].m_chunkId;
planeId = bondDescriptors[0].m_planeIndex;
std::vector<BondInfo> forwardChunks;
std::vector<BondInfo> backwardChunks;
float area = 0;
NvVec3 normal(0, 0, 0);
NvVec3 centroid(0, 0, 0);
int32_t collected = 0;
NvBounds3 bb = NvBounds3::empty();
chunkId = -1;
planeId = bondDescriptors[0].m_planeIndex;
for (uint32_t i = 0; i <= bondDescriptors.size(); ++i)
{
if (i == bondDescriptors.size() ||
(chunkId != bondDescriptors[i].m_chunkId || abs(planeId) != abs(bondDescriptors[i].m_planeIndex)))
{
if (chunkId != -1)
{
area = 0.5f * normal.normalize();
centroid /= 3.0f * collected;
if (bondDescriptors[i - 1].m_planeIndex > 0)
{
forwardChunks.push_back(BondInfo());
forwardChunks.back().area = area;
forwardChunks.back().normal = normal;
forwardChunks.back().centroid = centroid;
forwardChunks.back().m_chunkId = chunkId;
forwardChunks.back().m_bb = bb;
}
else
{
backwardChunks.push_back(BondInfo());
backwardChunks.back().area = area;
backwardChunks.back().normal = normal;
backwardChunks.back().centroid = centroid;
backwardChunks.back().m_chunkId = chunkId;
backwardChunks.back().m_bb = bb;
}
}
bb.setEmpty();
collected = 0;
area = 0;
normal = NvVec3(0, 0, 0);
centroid = NvVec3(0, 0, 0);
if (i != bondDescriptors.size())
chunkId = bondDescriptors[i].m_chunkId;
}
if (i == bondDescriptors.size() || abs(planeId) != abs(bondDescriptors[i].m_planeIndex))
{
for (uint32_t fchunk = 0; fchunk < forwardChunks.size(); ++fchunk)
{
const BondInfo& fInfo = forwardChunks[fchunk];
if (chunkIsSupport[fInfo.m_chunkId] == false)
{
continue;
}
for (uint32_t bchunk = 0; bchunk < backwardChunks.size(); ++bchunk)
{
const BondInfo& bInfo = backwardChunks[bchunk];
if (weakBoundingBoxIntersection(fInfo.m_bb, bInfo.m_bb) == 0)
{
continue;
}
if (chunkIsSupport[bInfo.m_chunkId] == false)
{
continue;
}
mResultBondDescs.push_back(NvBlastBondDesc());
NvBlastBondDesc& bondDesc = mResultBondDescs.back();
// Use the minimum-area patch for the bond area and centroid
if (fInfo.area < bInfo.area)
{
bondDesc.bond.area = fInfo.area;
bondDesc.bond.centroid[0] = fInfo.centroid.x;
bondDesc.bond.centroid[1] = fInfo.centroid.y;
bondDesc.bond.centroid[2] = fInfo.centroid.z;
bondDesc.bond.normal[0] = fInfo.normal.x;
bondDesc.bond.normal[1] = fInfo.normal.y;
bondDesc.bond.normal[2] = fInfo.normal.z;
}
else
{
bondDesc.bond.area = bInfo.area;
bondDesc.bond.centroid[0] = bInfo.centroid.x;
bondDesc.bond.centroid[1] = bInfo.centroid.y;
bondDesc.bond.centroid[2] = bInfo.centroid.z;
bondDesc.bond.normal[0] = -bInfo.normal.x;
bondDesc.bond.normal[1] = -bInfo.normal.y;
bondDesc.bond.normal[2] = -bInfo.normal.z;
}
bondDesc.chunkIndices[0] = fInfo.m_chunkId;
bondDesc.chunkIndices[1] = bInfo.m_chunkId;
}
}
forwardChunks.clear();
backwardChunks.clear();
if (i != bondDescriptors.size())
{
planeId = bondDescriptors[i].m_planeIndex;
}
else
{
break;
}
}
collected++;
auto& trRef = trianglesBuffer[chunkId].get()[bondDescriptors[i].triangleIndex];
normal += getNormal(trRef);
centroid += toNvShared(trRef.a.p);
centroid += toNvShared(trRef.b.p);
centroid += toNvShared(trRef.c.p);
bb.include(toNvShared(trRef.a.p));
bb.include(toNvShared(trRef.b.p));
bb.include(toNvShared(trRef.c.p));
}
}
if (hasApproximateBonding)
{
std::vector<Triangle> chunkTriangles;
std::vector<uint32_t> chunkTrianglesOffsets;
std::set<std::pair<uint32_t, uint32_t> > pairsAlreadyCreated;
for (uint32_t i = 0; i < mResultBondDescs.size(); ++i)
{
auto pr = (mResultBondDescs[i].chunkIndices[0] < mResultBondDescs[i].chunkIndices[1]) ?
std::make_pair(mResultBondDescs[i].chunkIndices[0], mResultBondDescs[i].chunkIndices[1]) :
std::make_pair(mResultBondDescs[i].chunkIndices[1], mResultBondDescs[i].chunkIndices[0]);
pairsAlreadyCreated.insert(pr);
}
const float EXPANSION = 0.01f;
chunkTrianglesOffsets.push_back(0);
for (uint32_t i = 0; i < chunkCount; ++i)
{
const float SCALE_FACTOR = 1.001f;
NvcVec3 centroid = {resultChunkDescriptors[i].centroid[0], resultChunkDescriptors[i].centroid[1],
resultChunkDescriptors[i].centroid[2]};
for (uint32_t k = 0; k < trianglesCount[i]; ++k)
{
chunkTriangles.push_back(trianglesBuffer[i].get()[k]);
// inflate mesh a bit
chunkTriangles.back().a.p = chunkTriangles.back().a.p + (chunkTriangles.back().a.p - centroid) * EXPANSION;
chunkTriangles.back().b.p = chunkTriangles.back().b.p + (chunkTriangles.back().b.p - centroid) * EXPANSION;
chunkTriangles.back().c.p = chunkTriangles.back().c.p + (chunkTriangles.back().c.p - centroid) * EXPANSION;
}
chunkTrianglesOffsets.push_back(chunkTriangles.size());
}
NvBlastBondDesc* adsc;
BondGenerationConfig cfg;
cfg.bondMode = BondGenerationConfig::AVERAGE;
cfg.maxSeparation = EXPANSION;
uint32_t nbListSize =
createFullBondListAveraged(chunkCount, chunkTrianglesOffsets.data(), chunkTriangles.data(), nullptr,
chunkIsSupport, nullptr, adsc, cfg, &pairsAlreadyCreated);
for (uint32_t i = 0; i < nbListSize; ++i)
{
mResultBondDescs.push_back(adsc[i]);
}
NVBLAST_FREE(adsc);
}
resultBondDescs = SAFE_ARRAY_NEW(NvBlastBondDesc, mResultBondDescs.size());
memcpy(resultBondDescs, mResultBondDescs.data(), sizeof(NvBlastBondDesc) * mResultBondDescs.size());
return mResultBondDescs.size();
}
int32_t BlastBondGeneratorImpl::createBondBetweenMeshes(uint32_t meshCount, const uint32_t* geometryOffset,
const Triangle* geometry, uint32_t overlapsCount,
const uint32_t* overlapsA, const uint32_t* overlapsB,
NvBlastBondDesc*& resultBond, BondGenerationConfig cfg)
{
if (cfg.bondMode == BondGenerationConfig::AVERAGE)
{
resetGeometryCache();
buildGeometryCache(meshCount, geometryOffset, geometry);
}
resultBond = SAFE_ARRAY_NEW(NvBlastBondDesc, overlapsCount);
if (cfg.bondMode == BondGenerationConfig::EXACT)
{
for (uint32_t i = 0; i < overlapsCount; ++i)
{
NvBlastBondDesc& desc = resultBond[i];
desc.chunkIndices[0] = overlapsA[i];
desc.chunkIndices[1] = overlapsB[i];
uint32_t meshACount = geometryOffset[overlapsA[i] + 1] - geometryOffset[overlapsA[i]];
uint32_t meshBCount = geometryOffset[overlapsB[i] + 1] - geometryOffset[overlapsB[i]];
createBondBetweenMeshes(meshACount, geometry + geometryOffset[overlapsA[i]], meshBCount,
geometry + geometryOffset[overlapsB[i]], desc.bond, cfg);
}
}
else
{
for (uint32_t i = 0; i < overlapsCount; ++i)
{
NvBlastBondDesc& desc = resultBond[i];
desc.chunkIndices[0] = overlapsA[i];
desc.chunkIndices[1] = overlapsB[i];
createBondForcedInternal(mHullsPointsCache[overlapsA[i]], mHullsPointsCache[overlapsB[i]],
*mCHullCache[overlapsA[i]], *mCHullCache[overlapsB[i]], mBoundsCache[overlapsA[i]],
mBoundsCache[overlapsB[i]], desc.bond, 0.3f);
}
}
return overlapsCount;
}
int32_t BlastBondGeneratorImpl::createBondBetweenMeshes(uint32_t meshACount, const Triangle* meshA, uint32_t meshBCount,
const Triangle* meshB, NvBlastBond& resultBond,
BondGenerationConfig conf)
{
float overlapping = 0.3f;
if (conf.bondMode == BondGenerationConfig::EXACT)
{
std::vector<uint32_t> chunksOffsets = { 0, meshACount, meshACount + meshBCount };
std::vector<Triangle> chunks;
chunks.resize(meshACount + meshBCount);
memcpy(chunks.data(), meshA, sizeof(Triangle) * meshACount);
memcpy(chunks.data() + meshACount, meshB, sizeof(Triangle) * meshBCount);
std::shared_ptr<bool> isSupport(new bool[2]{ true, true }, [](bool* b) { delete[] b; });
NvBlastBondDesc* desc;
uint32_t descSize = createFullBondListExact(2, chunksOffsets.data(), chunks.data(), isSupport.get(), desc, conf);
if (descSize > 0)
{
resultBond = desc->bond;
}
else
{
memset(&resultBond, 0, sizeof(NvBlastBond));
return 1;
}
return 0;
}
std::vector<NvcVec3> chunksPoints1(meshACount * 3);
std::vector<NvcVec3> chunksPoints2(meshBCount * 3);
int32_t sp = 0;
for (uint32_t i = 0; i < meshACount; ++i)
{
chunksPoints1[sp++] = meshA[i].a.p;
chunksPoints1[sp++] = meshA[i].b.p;
chunksPoints1[sp++] = meshA[i].c.p;
#ifdef DEBUG_OUTPUT
meshBuffer.push_back(meshA[i].a.p);
meshBuffer.push_back(meshA[i].b.p);
meshBuffer.push_back(meshA[i].c.p);
#endif
}
sp = 0;
for (uint32_t i = 0; i < meshBCount; ++i)
{
chunksPoints2[sp++] = meshB[i].a.p;
chunksPoints2[sp++] = meshB[i].b.p;
chunksPoints2[sp++] = meshB[i].c.p;
#ifdef DEBUG_OUTPUT
meshBuffer.push_back(meshB[i].a.p);
meshBuffer.push_back(meshB[i].b.p);
meshBuffer.push_back(meshB[i].c.p);
#endif
}
CollisionHull* cHull[2];
cHull[0] = mConvexMeshBuilder->buildCollisionGeometry(chunksPoints1.size(), chunksPoints1.data());
cHull[1] = mConvexMeshBuilder->buildCollisionGeometry(chunksPoints2.size(), chunksPoints2.data());
std::vector<NvVec3> hullPoints[2];
hullPoints[0].resize(cHull[0]->pointsCount);
hullPoints[1].resize(cHull[1]->pointsCount);
NvBounds3 bb[2];
bb[0].setEmpty();
bb[1].setEmpty();
for (uint32_t cv = 0; cv < 2; ++cv)
{
for (uint32_t i = 0; i < cHull[cv]->pointsCount; ++i)
{
hullPoints[cv][i] = toNvShared(cHull[cv]->points[i]);
bb[cv].include(hullPoints[cv][i]);
}
}
auto ret = createBondForcedInternal(hullPoints[0], hullPoints[1], *cHull[0], *cHull[1], bb[0], bb[1], resultBond,
overlapping);
mConvexMeshBuilder->releaseCollisionHull(cHull[0]);
mConvexMeshBuilder->releaseCollisionHull(cHull[1]);
return ret;
}
int32_t BlastBondGeneratorImpl::bondsFromPrefractured(uint32_t meshCount, const uint32_t* geometryCount,
const Triangle* geometry, const bool* chunkIsSupport,
NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf)
{
int32_t ret_val = 0;
switch (conf.bondMode)
{
case BondGenerationConfig::AVERAGE:
ret_val = createFullBondListAveraged(meshCount, geometryCount, geometry, nullptr, chunkIsSupport, nullptr,
resultBondDescs, conf);
break;
case BondGenerationConfig::EXACT:
ret_val = createFullBondListExact(meshCount, geometryCount, geometry, chunkIsSupport, resultBondDescs, conf);
break;
}
return ret_val;
}
int32_t BlastBondGeneratorImpl::bondsFromPrefractured(uint32_t meshCount, const uint32_t* convexHullOffset,
const CollisionHull** chunkHulls, const bool* chunkIsSupport,
const uint32_t* meshGroups, NvBlastBondDesc*& resultBondDescs,
float maxSeparation)
{
BondGenerationConfig conf;
conf.maxSeparation = maxSeparation;
conf.bondMode = BondGenerationConfig::AVERAGE;
return createFullBondListAveraged(meshCount, convexHullOffset, nullptr, chunkHulls, chunkIsSupport, meshGroups,
resultBondDescs, conf);
}
void BlastBondGeneratorImpl::release()
{
delete this;
}
} // namespace Blast
} // namespace Nv
| 56,069 | C++ | 37.091033 | 153 | 0.547433 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringCutoutImpl.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastGlobals.h"
#include <NvBlastAssert.h>
#include "NvBounds3.h"
#include "NvMath.h"
#include "NvAssert.h"
#include <NvBlastNvSharedHelpers.h>
#include "NvBlastExtAuthoringCutoutImpl.h"
#include <algorithm>
#include <set>
#include <map>
#include <stack>
#define CUTOUT_DISTANCE_THRESHOLD (0.7f)
#define CUTOUT_DISTANCE_EPS (0.01f)
using namespace Nv::Blast;
// Unsigned modulus
uint32_t mod(int32_t n, uint32_t modulus)
{
const int32_t d = n/(int32_t)modulus;
const int32_t m = n - d*(int32_t)modulus;
return m >= 0 ? (uint32_t)m : (uint32_t)m + modulus;
}
float square(float x)
{
return x * x;
}
// 2D cross product
float dotXY(const nvidia::NvVec3& v, const nvidia::NvVec3& w)
{
return v.x * w.x + v.y * w.y;
}
// Z-component of cross product
float crossZ(const nvidia::NvVec3& v, const nvidia::NvVec3& w)
{
return v.x * w.y - v.y * w.x;
}
// z coordinates may be used to store extra info - only deal with x and y
float perpendicularDistanceSquared(const nvidia::NvVec3& v0, const nvidia::NvVec3& v1, const nvidia::NvVec3& v2)
{
const nvidia::NvVec3 base = v2 - v0;
const nvidia::NvVec3 leg = v1 - v0;
const float baseLen2 = dotXY(base, base);
return baseLen2 > NV_EPS_F32 * dotXY(leg, leg) ? square(crossZ(base, leg)) / baseLen2 : 0.0f;
}
// z coordinates may be used to store extra info - only deal with x and y
float perpendicularDistanceSquared(const std::vector< nvidia::NvVec3 >& cutout, uint32_t index)
{
const uint32_t size = cutout.size();
return perpendicularDistanceSquared(cutout[(index + size - 1) % size], cutout[index], cutout[(index + 1) % size]);
}
////////////////////////////////////////////////
// ApexShareUtils - Begin
////////////////////////////////////////////////
struct BoundsRep
{
BoundsRep() : type(0)
{
aabb.setEmpty();
}
nvidia::NvBounds3 aabb;
uint32_t type; // By default only reports if subtypes are the same, configurable. Valid range {0...7}
};
struct IntPair
{
void set(int32_t _i0, int32_t _i1)
{
i0 = _i0;
i1 = _i1;
}
int32_t i0, i1;
static int compare(const void* a, const void* b)
{
const int32_t diff0 = ((IntPair*)a)->i0 - ((IntPair*)b)->i0;
return diff0 ? diff0 : (((IntPair*)a)->i1 - ((IntPair*)b)->i1);
}
};
struct BoundsInteractions
{
BoundsInteractions() : bits(0x8040201008040201ULL) {}
BoundsInteractions(bool setAll) : bits(setAll ? 0xFFFFFFFFFFFFFFFFULL : 0x0000000000000000ULL) {}
bool set(unsigned group1, unsigned group2, bool interacts)
{
if (group1 >= 8 || group2 >= 8)
{
return false;
}
const uint64_t mask = (uint64_t)1 << ((group1 << 3) + group2) | (uint64_t)1 << ((group2 << 3) + group1);
if (interacts)
{
bits |= mask;
}
else
{
bits &= ~mask;
}
return true;
}
uint64_t bits;
};
enum Bounds3Axes
{
Bounds3X = 1,
Bounds3Y = 2,
Bounds3Z = 4,
Bounds3XY = Bounds3X | Bounds3Y,
Bounds3YZ = Bounds3Y | Bounds3Z,
Bounds3ZX = Bounds3Z | Bounds3X,
Bounds3XYZ = Bounds3X | Bounds3Y | Bounds3Z
};
void boundsCalculateOverlaps(std::vector<IntPair>& overlaps, Bounds3Axes axesToUse, const BoundsRep* bounds, uint32_t boundsCount, uint32_t boundsByteStride,
const BoundsInteractions& interactions = BoundsInteractions(), bool append = false);
void createIndexStartLookup(std::vector<uint32_t>& lookup, int32_t indexBase, uint32_t indexRange, int32_t* indexSource, uint32_t indexCount, uint32_t indexByteStride);
/*
Index bank - double-sided free list for O(1) borrow/return of unique IDs
Type IndexType should be an unsigned integer type or something that can be cast to and from
an integer
*/
template <class IndexType>
class IndexBank
{
public:
IndexBank<IndexType>(uint32_t capacity = 0) : indexCount(0), capacityLocked(false)
{
maxCapacity = calculateMaxCapacity();
reserve_internal(capacity);
}
// Copy constructor
IndexBank<IndexType>(const IndexBank<IndexType>& other)
{
*this = other;
}
virtual ~IndexBank<IndexType>() {}
// Assignment operator
IndexBank<IndexType>& operator = (const IndexBank<IndexType>& other)
{
indices = other.indices;
ranks = other.ranks;
maxCapacity = other.maxCapacity;
indexCount = other.indexCount;
capacityLocked = other.capacityLocked;
return *this;
}
void setIndicesAndRanks(uint16_t* indicesIn, uint16_t* ranksIn, uint32_t capacityIn, uint32_t usedCountIn)
{
indexCount = usedCountIn;
reserve_internal(capacityIn);
for (uint32_t i = 0; i < capacityIn; ++i)
{
indices[i] = indicesIn[i];
ranks[i] = ranksIn[i];
}
}
void clear(uint32_t capacity = 0, bool used = false)
{
capacityLocked = false;
indices.reset();
ranks.reset();
reserve_internal(capacity);
if (used)
{
indexCount = capacity;
indices.resize(capacity);
for (IndexType i = (IndexType)0; i < (IndexType)capacity; ++i)
{
indices[i] = i;
}
}
else
{
indexCount = 0;
}
}
// Equivalent to calling freeLastUsed() until the used list is empty.
void clearFast()
{
indexCount = 0;
}
// This is the reserve size. The bank can only grow, due to shuffling of indices
virtual void reserve(uint32_t capacity)
{
reserve_internal(capacity);
}
// If lock = true, keeps bank from automatically resizing
void lockCapacity(bool lock)
{
capacityLocked = lock;
}
bool isCapacityLocked() const
{
return capacityLocked;
}
void setMaxCapacity(uint32_t inMaxCapacity)
{
// Cannot drop below current capacity, nor above max set by data types
maxCapacity = nvidia::NvClamp(inMaxCapacity, capacity(), calculateMaxCapacity());
}
uint32_t capacity() const
{
return indices.size();
}
uint32_t usedCount() const
{
return indexCount;
}
uint32_t freeCount() const
{
return capacity() - usedCount();
}
// valid from [0] to [size()-1]
const IndexType* usedIndices() const
{
return indices.data();
}
// valid from [0] to [free()-1]
const IndexType* freeIndices() const
{
return indices.begin() + usedCount();
}
bool isValid(IndexType index) const
{
return index < (IndexType)capacity();
}
bool isUsed(IndexType index) const
{
return isValid(index) && (ranks[index] < (IndexType)usedCount());
}
bool isFree(IndexType index) const
{
return isValid(index) && !isUsed();
}
IndexType getRank(IndexType index) const
{
return ranks[index];
}
// Gets the next available index, if any
bool useNextFree(IndexType& index)
{
if (freeCount() == 0)
{
if (capacityLocked)
{
return false;
}
if (capacity() >= maxCapacity)
{
return false;
}
reserve(nvidia::NvClamp(capacity() * 2, (uint32_t)1, maxCapacity));
NVBLAST_ASSERT(freeCount() > 0);
}
index = indices[indexCount++];
return true;
}
// Frees the last used index, if any
bool freeLastUsed(IndexType& index)
{
if (usedCount() == 0)
{
return false;
}
index = indices[--indexCount];
return true;
}
// Requests a particular index. If that index is available, it is borrowed and the function
// returns true. Otherwise nothing happens and the function returns false.
bool use(IndexType index)
{
if (!indexIsValidForUse(index))
{
return false;
}
IndexType oldRank;
placeIndexAtRank(index, (IndexType)indexCount++, oldRank);
return true;
}
bool free(IndexType index)
{
if (!indexIsValidForFreeing(index))
{
return false;
}
IndexType oldRank;
placeIndexAtRank(index, (IndexType)--indexCount, oldRank);
return true;
}
bool useAndReturnRanks(IndexType index, IndexType& newRank, IndexType& oldRank)
{
if (!indexIsValidForUse(index))
{
return false;
}
newRank = (IndexType)indexCount++;
placeIndexAtRank(index, newRank, oldRank);
return true;
}
bool freeAndReturnRanks(IndexType index, IndexType& newRank, IndexType& oldRank)
{
if (!indexIsValidForFreeing(index))
{
return false;
}
newRank = (IndexType)--indexCount;
placeIndexAtRank(index, newRank, oldRank);
return true;
}
protected:
bool indexIsValidForUse(IndexType index)
{
if (!isValid(index))
{
if (capacityLocked)
{
return false;
}
if (capacity() >= maxCapacity)
{
return false;
}
reserve(nvidia::NvClamp(2 * (uint32_t)index, (uint32_t)1, maxCapacity));
NVBLAST_ASSERT(isValid(index));
}
return !isUsed(index);
}
bool indexIsValidForFreeing(IndexType index)
{
if (!isValid(index))
{
// Invalid index
return false;
}
return isUsed(index);
}
// This is the reserve size. The bank can only grow, due to shuffling of indices
void reserve_internal(uint32_t capacity)
{
capacity = std::min(capacity, maxCapacity);
const uint32_t oldCapacity = indices.size();
if (capacity > oldCapacity)
{
indices.resize(capacity);
ranks.resize(capacity);
for (IndexType i = (IndexType)oldCapacity; i < (IndexType)capacity; ++i)
{
indices[i] = i;
ranks[i] = i;
}
}
}
private:
void placeIndexAtRank(IndexType index, IndexType newRank, IndexType& oldRank) // returns old rank
{
const IndexType replacementIndex = indices[newRank];
oldRank = ranks[index];
indices[oldRank] = replacementIndex;
indices[newRank] = index;
ranks[replacementIndex] = oldRank;
ranks[index] = newRank;
}
uint32_t calculateMaxCapacity()
{
#pragma warning(push)
#pragma warning(disable: 4127) // conditional expression is constant
if (sizeof(IndexType) >= sizeof(uint32_t))
{
return 0xFFFFFFFF; // Limited by data type we use to report capacity
}
else
{
return (1u << (8 * std::min((uint32_t)sizeof(IndexType), 3u))) - 1; // Limited by data type we use for indices
}
#pragma warning(pop)
}
protected:
std::vector<IndexType> indices;
std::vector<IndexType> ranks;
uint32_t maxCapacity;
uint32_t indexCount;
bool capacityLocked;
};
struct Marker
{
float pos;
uint32_t id; // lsb = type (0 = max, 1 = min), other bits used for object index
void set(float _pos, int32_t _id)
{
pos = _pos;
id = (uint32_t)_id;
}
};
static int compareMarkers(const void* A, const void* B)
{
// Sorts by value. If values equal, sorts min types greater than max types, to reduce the # of overlaps
const float delta = ((Marker*)A)->pos - ((Marker*)B)->pos;
return delta != 0 ? (delta < 0 ? -1 : 1) : ((int)(((Marker*)A)->id & 1) - (int)(((Marker*)B)->id & 1));
}
void boundsCalculateOverlaps(std::vector<IntPair>& overlaps, Bounds3Axes axesToUse, const BoundsRep* bounds, uint32_t boundsCount, uint32_t boundsByteStride,
const BoundsInteractions& interactions, bool append)
{
if (!append)
{
overlaps.clear();
}
uint32_t D = 0;
uint32_t axisNums[3];
for (unsigned i = 0; i < 3; ++i)
{
if ((axesToUse >> i) & 1)
{
axisNums[D++] = i;
}
}
if (D == 0 || D > 3)
{
return;
}
std::vector< std::vector<Marker> > axes;
axes.resize(D);
uint32_t overlapCount[3];
for (uint32_t n = 0; n < D; ++n)
{
const uint32_t axisNum = axisNums[n];
std::vector<Marker>& axis = axes[n];
overlapCount[n] = 0;
axis.resize(2 * boundsCount);
uint8_t* boundsPtr = (uint8_t*)bounds;
for (uint32_t i = 0; i < boundsCount; ++i, boundsPtr += boundsByteStride)
{
const BoundsRep& boundsRep = *(const BoundsRep*)boundsPtr;
const nvidia::NvBounds3& box = boundsRep.aabb;
float min = box.minimum[axisNum];
float max = box.maximum[axisNum];
if (min >= max)
{
const float mid = 0.5f * (min + max);
float pad = 0.000001f * fabsf(mid);
min = mid - pad;
max = mid + pad;
}
axis[i << 1].set(min, (int32_t)i << 1 | 1);
axis[i << 1 | 1].set(max, (int32_t)i << 1);
}
qsort(axis.data(), axis.size(), sizeof(Marker), compareMarkers);
uint32_t localOverlapCount = 0;
for (uint32_t i = 0; i < axis.size(); ++i)
{
Marker& marker = axis[i];
if (marker.id & 1)
{
overlapCount[n] += localOverlapCount;
++localOverlapCount;
}
else
{
--localOverlapCount;
}
}
}
unsigned int axis0;
unsigned int axis1;
unsigned int axis2;
unsigned int maxBin;
if (D == 1)
{
maxBin = 0;
axis0 = axisNums[0];
axis1 = axis0;
axis2 = axis0;
}
else if (D == 2)
{
if (overlapCount[0] < overlapCount[1])
{
maxBin = 0;
axis0 = axisNums[0];
axis1 = axisNums[1];
axis2 = axis0;
}
else
{
maxBin = 1;
axis0 = axisNums[1];
axis1 = axisNums[0];
axis2 = axis0;
}
}
else
{
maxBin = overlapCount[0] < overlapCount[1] ? (overlapCount[0] < overlapCount[2] ? 0U : 2U) : (overlapCount[1] < overlapCount[2] ? 1U : 2U);
axis0 = axisNums[maxBin];
axis1 = (axis0 + 1) % 3;
axis2 = (axis0 + 2) % 3;
}
const uint64_t interactionBits = interactions.bits;
IndexBank<uint32_t> localOverlaps(boundsCount);
std::vector<Marker>& axis = axes[maxBin];
float boxMin1 = 0.0f;
float boxMax1 = 0.0f;
float boxMin2 = 0.0f;
float boxMax2 = 0.0f;
for (uint32_t i = 0; i < axis.size(); ++i)
{
Marker& marker = axis[i];
const uint32_t index = marker.id >> 1;
if (marker.id & 1)
{
const BoundsRep& boundsRep = *(const BoundsRep*)((uint8_t*)bounds + index*boundsByteStride);
const uint8_t interaction = (uint8_t)((interactionBits >> (boundsRep.type << 3)) & 0xFF);
const nvidia::NvBounds3& box = boundsRep.aabb;
// These conditionals compile out with optimization:
if (D > 1)
{
boxMin1 = box.minimum[axis1];
boxMax1 = box.maximum[axis1];
if (D == 3)
{
boxMin2 = box.minimum[axis2];
boxMax2 = box.maximum[axis2];
}
}
const uint32_t localOverlapCount = localOverlaps.usedCount();
const uint32_t* localOverlapIndices = localOverlaps.usedIndices();
for (uint32_t j = 0; j < localOverlapCount; ++j)
{
const uint32_t overlapIndex = localOverlapIndices[j];
const BoundsRep& overlapBoundsRep = *(const BoundsRep*)((uint8_t*)bounds + overlapIndex*boundsByteStride);
if ((interaction >> overlapBoundsRep.type) & 1)
{
const nvidia::NvBounds3& overlapBox = overlapBoundsRep.aabb;
// These conditionals compile out with optimization:
if (D > 1)
{
if (boxMin1 >= overlapBox.maximum[axis1] || boxMax1 <= overlapBox.minimum[axis1])
{
continue;
}
if (D == 3)
{
if (boxMin2 >= overlapBox.maximum[axis2] || boxMax2 <= overlapBox.minimum[axis2])
{
continue;
}
}
}
// Add overlap
IntPair pair;
pair.i0 = (int32_t)index;
pair.i1 = (int32_t)overlapIndex;
overlaps.push_back(pair);
}
}
NVBLAST_ASSERT(localOverlaps.isValid(index));
NVBLAST_ASSERT(!localOverlaps.isUsed(index));
localOverlaps.use(index);
}
else
{
// Remove local overlap
NVBLAST_ASSERT(localOverlaps.isValid(index));
localOverlaps.free(index);
}
}
}
void createIndexStartLookup(std::vector<uint32_t>& lookup, int32_t indexBase, uint32_t indexRange, int32_t* indexSource, uint32_t indexCount, uint32_t indexByteStride)
{
if (indexRange == 0)
{
lookup.resize(std::max(indexRange + 1, 2u));
lookup[0] = 0;
lookup[1] = indexCount;
}
else
{
lookup.resize(indexRange + 1);
uint32_t indexPos = 0;
for (uint32_t i = 0; i < indexRange; ++i)
{
for (; indexPos < indexCount; ++indexPos, indexSource = (int32_t*)((uintptr_t)indexSource + indexByteStride))
{
if (*indexSource >= (int32_t)i + indexBase)
{
lookup[i] = indexPos;
break;
}
}
if (indexPos == indexCount)
{
lookup[i] = indexPos;
}
}
lookup[indexRange] = indexCount;
}
}
////////////////////////////////////////////////
// ApexShareUtils - End
////////////////////////////////////////////////
struct CutoutVert
{
int32_t cutoutIndex;
int32_t vertIndex;
void set(int32_t _cutoutIndex, int32_t _vertIndex)
{
cutoutIndex = _cutoutIndex;
vertIndex = _vertIndex;
}
};
struct NewVertex
{
CutoutVert vertex;
float edgeProj;
};
static int compareNewVertices(const void* a, const void* b)
{
const int32_t cutoutDiff = ((NewVertex*)a)->vertex.cutoutIndex - ((NewVertex*)b)->vertex.cutoutIndex;
if (cutoutDiff)
{
return cutoutDiff;
}
const int32_t vertDiff = ((NewVertex*)a)->vertex.vertIndex - ((NewVertex*)b)->vertex.vertIndex;
if (vertDiff)
{
return vertDiff;
}
const float projDiff = ((NewVertex*)a)->edgeProj - ((NewVertex*)b)->edgeProj;
return projDiff ? (projDiff < 0.0f ? -1 : 1) : 0;
}
template<typename T>
class Map2d
{
public:
Map2d(uint32_t width, uint32_t height)
{
create_internal(width, height, NULL);
}
Map2d(uint32_t width, uint32_t height, T fillValue)
{
create_internal(width, height, &fillValue);
}
Map2d(const Map2d& map)
{
*this = map;
}
Map2d& operator = (const Map2d& map)
{
mMem.clear();
create_internal(map.mWidth, map.mHeight, NULL);
return *this;
}
void create(uint32_t width, uint32_t height)
{
return create_internal(width, height, NULL);
}
void create(uint32_t width, uint32_t height, T fillValue)
{
create_internal(width, height, &fillValue);
}
//void clear(const T value)
//{
// for (auto it = mMem.begin(); it != mMem.end(); it++)
// {
// for (auto it2 = it->begin(); it2 != it->end(); it2++)
// {
// *it2 = value;
// }
// }
//}
void setOrigin(uint32_t x, uint32_t y)
{
mOriginX = x;
mOriginY = y;
}
const T& operator()(int32_t x, int32_t y) const
{
x = (int32_t)mod(x+(int32_t)mOriginX, mWidth);
y = (int32_t)mod(y+(int32_t)mOriginY, mHeight);
return mMem[y][x];
}
T& operator()(int32_t x, int32_t y)
{
x = (int32_t)mod(x+(int32_t)mOriginX, mWidth);
y = (int32_t)mod(y+(int32_t)mOriginY, mHeight);
return mMem[y][x];
}
private:
void create_internal(uint32_t width, uint32_t height, T* val)
{
mMem.clear();
mWidth = width;
mHeight = height;
mMem.resize(mHeight);
for (auto it = mMem.begin(); it != mMem.end(); it++)
{
it->resize(mWidth, val ? *val : 0);
}
mOriginX = 0;
mOriginY = 0;
}
std::vector<std::vector<T>> mMem;
uint32_t mWidth;
uint32_t mHeight;
uint32_t mOriginX;
uint32_t mOriginY;
};
class BitMap
{
public:
BitMap() : mMem(NULL) {}
BitMap(uint32_t width, uint32_t height) : mMem(NULL)
{
create_internal(width, height, NULL);
}
BitMap(uint32_t width, uint32_t height, bool fillValue) : mMem(NULL)
{
create_internal(width, height, &fillValue);
}
BitMap(const BitMap& map)
{
*this = map;
}
~BitMap()
{
delete [] mMem;
}
BitMap& operator = (const BitMap& map)
{
delete [] mMem;
mMem = NULL;
if (map.mMem)
{
create_internal(map.mWidth, map.mHeight, NULL);
memcpy(mMem, map.mMem, mHeight * mRowBytes);
}
return *this;
}
void create(uint32_t width, uint32_t height)
{
return create_internal(width, height, NULL);
}
void create(uint32_t width, uint32_t height, bool fillValue)
{
create_internal(width, height, &fillValue);
}
void clear(bool value)
{
memset(mMem, value ? 0xFF : 0x00, mRowBytes * mHeight);
}
void setOrigin(uint32_t x, uint32_t y)
{
mOriginX = x;
mOriginY = y;
}
bool read(int32_t x, int32_t y) const
{
x = (int32_t)mod(x+(int32_t)mOriginX, mWidth);
y = (int32_t)mod(y+(int32_t)mOriginY, mHeight);
return ((mMem[(x >> 3) + y * mRowBytes] >> (x & 7)) & 1) != 0;
}
void set(int32_t x, int32_t y)
{
x = (int32_t)mod(x+(int32_t)mOriginX, mWidth);
y = (int32_t)mod(y+(int32_t)mOriginY, mHeight);
mMem[(x >> 3) + y * mRowBytes] |= 1 << (x & 7);
}
void reset(int32_t x, int32_t y)
{
x = (int32_t)mod(x+(int32_t)mOriginX, mWidth);
y = (int32_t)mod(y+(int32_t)mOriginY, mHeight);
mMem[(x >> 3) + y * mRowBytes] &= ~(1 << (x & 7));
}
private:
void create_internal(uint32_t width, uint32_t height, bool* val)
{
delete [] mMem;
mRowBytes = (width + 7) >> 3;
const uint32_t bytes = mRowBytes * height;
if (bytes == 0)
{
mWidth = mHeight = 0;
mMem = NULL;
return;
}
mWidth = width;
mHeight = height;
mMem = new uint8_t[bytes];
mOriginX = 0;
mOriginY = 0;
if (val)
{
clear(*val);
}
}
uint8_t* mMem;
uint32_t mWidth;
uint32_t mHeight;
uint32_t mRowBytes;
uint32_t mOriginX;
uint32_t mOriginY;
};
int32_t taxicabSine(int32_t i)
{
// 0 1 1 1 0 -1 -1 -1
return (int32_t)((0x01A9 >> ((i & 7) << 1)) & 3) - 1;
}
// Only looks at x and y components
bool directionsXYOrderedCCW(const nvidia::NvVec3& d0, const nvidia::NvVec3& d1, const nvidia::NvVec3& d2)
{
const bool ccw02 = crossZ(d0, d2) > 0.0f;
const bool ccw01 = crossZ(d0, d1) > 0.0f;
const bool ccw21 = crossZ(d2, d1) > 0.0f;
return ccw02 ? ccw01 && ccw21 : ccw01 || ccw21;
}
std::pair<float, float> compareTraceSegmentToLineSegment(const std::vector<POINT2D>& trace, int _start, int delta, float distThreshold, uint32_t width, uint32_t height, bool hasBorder)
{
if (delta < 2)
{
return std::make_pair(0.0f, 0.0f);
}
const uint32_t size = trace.size();
uint32_t start = (uint32_t)_start, end = (uint32_t)(_start + delta) % size;
const bool startIsOnBorder = hasBorder && (trace[start].x == -1 || trace[start].x == (int)width || trace[start].y == -1 || trace[start].y == (int)height);
const bool endIsOnBorder = hasBorder && (trace[end].x == -1 || trace[end].x == (int)width || trace[end].y == -1 || trace[end].y == (int)height);
if (startIsOnBorder || endIsOnBorder)
{
if ((trace[start].x == -1 && trace[end].x == -1) ||
(trace[start].y == -1 && trace[end].y == -1) ||
(trace[start].x == (int)width && trace[end].x == (int)width) ||
(trace[start].y == (int)height && trace[end].y == (int)height))
{
return std::make_pair(0.0f, 0.0f);
}
return std::make_pair(NV_MAX_F32, NV_MAX_F32);
}
nvidia::NvVec3 orig((float)trace[start].x, (float)trace[start].y, 0);
nvidia::NvVec3 dest((float)trace[end].x, (float)trace[end].y, 0);
nvidia::NvVec3 dir = dest - orig;
dir.normalize();
float aveError = 0.0f;
float aveError2 = 0.0f;
for (;;)
{
if (++start >= size)
{
start = 0;
}
if (start == end)
{
break;
}
nvidia::NvVec3 testDisp((float)trace[start].x, (float)trace[start].y, 0);
testDisp -= orig;
aveError += (float)(nvidia::NvAbs(testDisp.x * dir.y - testDisp.y * dir.x) >= distThreshold);
aveError2 += nvidia::NvAbs(testDisp.x * dir.y - testDisp.y * dir.x);
}
aveError /= delta - 1;
aveError2 /= delta - 1;
return std::make_pair(aveError, aveError2);
}
// Segment i starts at vi and ends at vi+ei
// Tests for overlap in segments' projection onto xy plane
// Returns distance between line segments. (Negative value indicates overlap.)
float segmentsIntersectXY(const nvidia::NvVec3& v0, const nvidia::NvVec3& e0, const nvidia::NvVec3& v1, const nvidia::NvVec3& e1)
{
const nvidia::NvVec3 dv = v1 - v0;
nvidia::NvVec3 d0 = e0;
d0.normalize();
nvidia::NvVec3 d1 = e1;
d1.normalize();
const float c10 = crossZ(dv, d0);
const float d10 = crossZ(e1, d0);
float a1 = nvidia::NvAbs(c10);
float b1 = nvidia::NvAbs(c10 + d10);
if (c10 * (c10 + d10) < 0.0f)
{
if (a1 < b1)
{
a1 = -a1;
}
else
{
b1 = -b1;
}
}
const float c01 = crossZ(d1, dv);
const float d01 = crossZ(e0, d1);
float a2 = nvidia::NvAbs(c01);
float b2 = nvidia::NvAbs(c01 + d01);
if (c01 * (c01 + d01) < 0.0f)
{
if (a2 < b2)
{
a2 = -a2;
}
else
{
b2 = -b2;
}
}
return nvidia::NvMax(nvidia::NvMin(a1, b1), nvidia::NvMin(a2, b2));
}
// If point projects onto segment, returns true and proj is set to a
// value in the range [0,1], indicating where along the segment (from v0 to v1)
// the projection lies, and dist2 is set to the distance squared from point to
// the line segment. Otherwise, returns false.
// Note, if v1 = v0, then the function returns true with proj = 0.
bool projectOntoSegmentXY(float& proj, float& dist2, const nvidia::NvVec3& point, const nvidia::NvVec3& v0, const nvidia::NvVec3& v1, float margin)
{
const nvidia::NvVec3 seg = v1 - v0;
const nvidia::NvVec3 x = point - v0;
const float seg2 = dotXY(seg, seg);
const float d = dotXY(x, seg);
if (d < 0.0f || d > seg2)
{
return false;
}
const float margin2 = margin * margin;
const float p = seg2 > 0.0f ? d / seg2 : 0.0f;
const float lineDist2 = d * p;
if (lineDist2 < margin2)
{
return false;
}
const float pPrime = 1.0f - p;
const float dPrime = seg2 - d;
const float lineDistPrime2 = dPrime * pPrime;
if (lineDistPrime2 < margin2)
{
return false;
}
proj = p;
dist2 = dotXY(x, x) - lineDist2;
return true;
}
bool isOnBorder(const nvidia::NvVec3& v, uint32_t width, uint32_t height)
{
return v.x < -0.5f || v.x >= width - 0.5f || v.y < -0.5f || v.y >= height - 0.5f;
}
static void createCutout(Nv::Blast::Cutout& cutout, const std::vector<POINT2D>& trace, float segmentationErrorThreshold, float snapThreshold, uint32_t width, uint32_t height, bool hasBorder)
{
cutout.vertices.clear();
cutout.smoothingGroups.clear();
std::vector<int> smoothingGroups;
const uint32_t traceSize = trace.size();
if (traceSize == 0)
{
return; // Nothing to do
}
uint32_t size = traceSize;
std::vector<int> vertexIndices;
const float pixelCenterOffset = hasBorder ? 0.5f : 0.0f;
// Find best segment
uint32_t start = 0;
uint32_t delta = 0;
float err2 = 0.f;
for (uint32_t iStart = 0; iStart < size; ++iStart)
{
uint32_t iDelta = (size >> 1) + (size & 1);
for (; iDelta > 1; --iDelta)
{
auto fit = compareTraceSegmentToLineSegment(trace, (int32_t)iStart, (int32_t)iDelta, CUTOUT_DISTANCE_THRESHOLD, width, height, hasBorder);
if (fit.first < segmentationErrorThreshold)
{
err2 = fit.second;
break;
}
}
if (iDelta > delta)
{
start = iStart;
delta = iDelta;
}
}
if (err2 < segmentationErrorThreshold)
{
smoothingGroups.push_back(cutout.vertices.size());
}
cutout.vertices.push_back(nvidia::NvVec3((float)trace[start].x + pixelCenterOffset, (float)trace[start].y + pixelCenterOffset, 0));
// Now complete the loop
while ((size -= delta) > 0)
{
start = (start + delta) % traceSize;
cutout.vertices.push_back(nvidia::NvVec3((float)trace[start].x + pixelCenterOffset, (float)trace[start].y + pixelCenterOffset, 0));
if (size == 1)
{
delta = 1;
break;
}
bool sg = true;
for (delta = size - 1; delta > 1; --delta)
{
auto fit = compareTraceSegmentToLineSegment(trace, (int32_t)start, (int32_t)delta, CUTOUT_DISTANCE_THRESHOLD, width, height, hasBorder);
if (fit.first < segmentationErrorThreshold)
{
if (fit.second > segmentationErrorThreshold)
{
sg = false;
}
break;
}
}
if (sg)
{
smoothingGroups.push_back(cutout.vertices.size());
}
}
const float snapThresh2 = square(snapThreshold);
// Use the snapThreshold to clean up
while ((size = cutout.vertices.size()) >= 4)
{
bool reduced = false;
for (uint32_t i = 0; i < size; ++i)
{
const uint32_t i1 = (i + 1) % size;
const uint32_t i2 = (i + 2) % size;
const uint32_t i3 = (i + 3) % size;
nvidia::NvVec3& v0 = cutout.vertices[i];
nvidia::NvVec3& v1 = cutout.vertices[i1];
nvidia::NvVec3& v2 = cutout.vertices[i2];
nvidia::NvVec3& v3 = cutout.vertices[i3];
const nvidia::NvVec3 d0 = v1 - v0;
const nvidia::NvVec3 d1 = v2 - v1;
const nvidia::NvVec3 d2 = v3 - v2;
const float den = crossZ(d0, d2);
if (den != 0)
{
const float recipDen = 1.0f / den;
const float s0 = crossZ(d1, d2) * recipDen;
const float s2 = crossZ(d0, d1) * recipDen;
if (s0 >= 0 || s2 >= 0)
{
if (d0.magnitudeSquared()*s0* s0 <= snapThresh2 && d2.magnitudeSquared()*s2* s2 <= snapThresh2)
{
v1 += d0 * s0;
//uint32_t index = (uint32_t)(&v2 - cutout.vertices.begin());
int dist = std::distance(cutout.vertices.data(), &v2);
cutout.vertices.erase(cutout.vertices.begin() + dist);
for (auto& idx : smoothingGroups)
{
if (idx > dist)
{
idx--;
}
}
reduced = true;
break;
}
}
}
}
if (!reduced)
{
break;
}
}
for (size_t i = 0; i < smoothingGroups.size(); i++)
{
if (i > 0 && smoothingGroups[i] == smoothingGroups[i - 1])
{
continue;
}
if (smoothingGroups[i] < static_cast<int>(cutout.vertices.size()))
{
cutout.smoothingGroups.push_back(cutout.vertices[smoothingGroups[i]]);
}
}
}
static void splitTJunctions(Nv::Blast::CutoutSetImpl& cutoutSet, float threshold)
{
// Set bounds reps
std::vector<BoundsRep> bounds;
std::vector<CutoutVert> cutoutMap; // maps bounds # -> ( cutout #, vertex # ).
std::vector<IntPair> overlaps;
const float distThreshold2 = threshold * threshold;
// Split T-junctions
uint32_t edgeCount = 0;
for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i)
{
edgeCount += cutoutSet.cutoutLoops[i].vertices.size();
}
bounds.resize(edgeCount);
cutoutMap.resize(edgeCount);
edgeCount = 0;
for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i)
{
Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[i];
const uint32_t cutoutSize = cutout.vertices.size();
for (uint32_t j = 0; j < cutoutSize; ++j)
{
bounds[edgeCount].aabb.include(cutout.vertices[j]);
bounds[edgeCount].aabb.include(cutout.vertices[(j + 1) % cutoutSize]);
NVBLAST_ASSERT(!bounds[edgeCount].aabb.isEmpty());
bounds[edgeCount].aabb.fattenFast(threshold);
cutoutMap[edgeCount].set((int32_t)i, (int32_t)j);
++edgeCount;
}
}
// Find bounds overlaps
if (bounds.size() > 0)
{
boundsCalculateOverlaps(overlaps, Bounds3XY, &bounds[0], bounds.size(), sizeof(bounds[0]));
}
std::vector<NewVertex> newVertices;
for (uint32_t overlapIndex = 0; overlapIndex < overlaps.size(); ++overlapIndex)
{
const IntPair& mapPair = overlaps[overlapIndex];
const CutoutVert& seg0Map = cutoutMap[(uint32_t)mapPair.i0];
const CutoutVert& seg1Map = cutoutMap[(uint32_t)mapPair.i1];
if (seg0Map.cutoutIndex == seg1Map.cutoutIndex)
{
// Only split based on vertex/segment junctions from different cutouts
continue;
}
NewVertex newVertex;
float dist2 = 0;
const Nv::Blast::Cutout& cutout0 = cutoutSet.cutoutLoops[(uint32_t)seg0Map.cutoutIndex];
const uint32_t cutoutSize0 = cutout0.vertices.size();
const Nv::Blast::Cutout& cutout1 = cutoutSet.cutoutLoops[(uint32_t)seg1Map.cutoutIndex];
const uint32_t cutoutSize1 = cutout1.vertices.size();
if (projectOntoSegmentXY(newVertex.edgeProj, dist2, cutout0.vertices[(uint32_t)seg0Map.vertIndex], cutout1.vertices[(uint32_t)seg1Map.vertIndex],
cutout1.vertices[(uint32_t)(seg1Map.vertIndex + 1) % cutoutSize1], 0.25f))
{
if (dist2 <= distThreshold2)
{
newVertex.vertex = seg1Map;
newVertices.push_back(newVertex);
}
}
if (projectOntoSegmentXY(newVertex.edgeProj, dist2, cutout1.vertices[(uint32_t)seg1Map.vertIndex], cutout0.vertices[(uint32_t)seg0Map.vertIndex],
cutout0.vertices[(uint32_t)(seg0Map.vertIndex + 1) % cutoutSize0], 0.25f))
{
if (dist2 <= distThreshold2)
{
newVertex.vertex = seg0Map;
newVertices.push_back(newVertex);
}
}
}
if (newVertices.size())
{
// Sort new vertices
qsort(newVertices.data(), newVertices.size(), sizeof(NewVertex), compareNewVertices);
// Insert new vertices
uint32_t lastCutoutIndex = 0xFFFFFFFF;
uint32_t lastVertexIndex = 0xFFFFFFFF;
float lastProj = 1.0f;
for (uint32_t newVertexIndex = newVertices.size(); newVertexIndex--;)
{
const NewVertex& newVertex = newVertices[newVertexIndex];
if (newVertex.vertex.cutoutIndex != (int32_t)lastCutoutIndex)
{
lastCutoutIndex = (uint32_t)newVertex.vertex.cutoutIndex;
lastVertexIndex = 0xFFFFFFFF;
}
if (newVertex.vertex.vertIndex != (int32_t)lastVertexIndex)
{
lastVertexIndex = (uint32_t)newVertex.vertex.vertIndex;
lastProj = 1.0f;
}
Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[(uint32_t)newVertex.vertex.cutoutIndex];
const float proj = lastProj > 0.0f ? newVertex.edgeProj / lastProj : 0.0f;
const nvidia::NvVec3 pos = (1.0f - proj) * cutout.vertices[(uint32_t)newVertex.vertex.vertIndex]
+ proj * cutout.vertices[(uint32_t)(newVertex.vertex.vertIndex + 1) % cutout.vertices.size()];
cutout.vertices.push_back(nvidia::NvVec3());
for (uint32_t n = cutout.vertices.size(); --n > (uint32_t)newVertex.vertex.vertIndex + 1;)
{
cutout.vertices[n] = cutout.vertices[n - 1];
}
cutout.vertices[(uint32_t)newVertex.vertex.vertIndex + 1] = pos;
lastProj = newVertex.edgeProj;
}
}
}
static void mergeVertices(Nv::Blast::CutoutSetImpl& cutoutSet, float threshold, uint32_t width, uint32_t height)
{
// Set bounds reps
uint32_t vertexCount = 0;
for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i)
{
vertexCount += cutoutSet.cutoutLoops[i].vertices.size();
}
std::vector<BoundsRep> bounds;
std::vector<CutoutVert> cutoutMap; // maps bounds # -> ( cutout #, vertex # ).
bounds.resize(vertexCount);
cutoutMap.resize(vertexCount);
vertexCount = 0;
for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i)
{
Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[i];
for (uint32_t j = 0; j < cutout.vertices.size(); ++j)
{
nvidia::NvVec3& vertex = cutout.vertices[j];
nvidia::NvVec3 min(vertex.x - threshold, vertex.y - threshold, 0.0f);
nvidia::NvVec3 max(vertex.x + threshold, vertex.y + threshold, 0.0f);
bounds[vertexCount].aabb = nvidia::NvBounds3(min, max);
cutoutMap[vertexCount].set((int32_t)i, (int32_t)j);
++vertexCount;
}
}
// Find bounds overlaps
std::vector<IntPair> overlaps;
if (bounds.size() > 0)
{
boundsCalculateOverlaps(overlaps, Bounds3XY, &bounds[0], bounds.size(), sizeof(bounds[0]));
}
uint32_t overlapCount = overlaps.size();
if (overlapCount == 0)
{
return;
}
// Sort by first index
qsort(overlaps.data(), overlapCount, sizeof(IntPair), IntPair::compare);
const float threshold2 = threshold * threshold;
std::vector<IntPair> pairs;
// Group by first index
std::vector<uint32_t> lookup;
createIndexStartLookup(lookup, 0, vertexCount, &overlaps.begin()->i0, overlapCount, sizeof(IntPair));
for (uint32_t i = 0; i < vertexCount; ++i)
{
const uint32_t start = lookup[i];
const uint32_t stop = lookup[i + 1];
if (start == stop)
{
continue;
}
const CutoutVert& cutoutVert0 = cutoutMap[(uint32_t)overlaps[start].i0];
const nvidia::NvVec3& vert0 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert0.cutoutIndex].vertices[(uint32_t)cutoutVert0.vertIndex];
const bool isOnBorder0 = !cutoutSet.periodic && isOnBorder(vert0, width, height);
for (uint32_t j = start; j < stop; ++j)
{
const CutoutVert& cutoutVert1 = cutoutMap[(uint32_t)overlaps[j].i1];
if (cutoutVert0.cutoutIndex == cutoutVert1.cutoutIndex)
{
// No pairs from the same cutout
continue;
}
const nvidia::NvVec3& vert1 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert1.cutoutIndex].vertices[(uint32_t)cutoutVert1.vertIndex];
const bool isOnBorder1 = !cutoutSet.periodic && isOnBorder(vert1, width, height);
if (isOnBorder0 != isOnBorder1)
{
// No border/non-border pairs
continue;
}
if ((vert0 - vert1).magnitudeSquared() > threshold2)
{
// Distance outside threshold
continue;
}
// A keeper. Keep a symmetric list
IntPair overlap = overlaps[j];
pairs.push_back(overlap);
const int32_t i0 = overlap.i0;
overlap.i0 = overlap.i1;
overlap.i1 = i0;
pairs.push_back(overlap);
}
}
if (pairs.size() == 0)
{
return;
}
// Sort by first index
qsort(pairs.data(), pairs.size(), sizeof(IntPair), IntPair::compare);
// For every vertex, only keep closest neighbor from each cutout
createIndexStartLookup(lookup, 0, vertexCount, &pairs.begin()->i0, pairs.size(), sizeof(IntPair));
for (uint32_t i = 0; i < vertexCount; ++i)
{
const uint32_t start = lookup[i];
const uint32_t stop = lookup[i + 1];
if (start == stop)
{
continue;
}
const CutoutVert& cutoutVert0 = cutoutMap[(uint32_t)pairs[start].i0];
const nvidia::NvVec3& vert0 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert0.cutoutIndex].vertices[(uint32_t)cutoutVert0.vertIndex];
uint32_t groupStart = start;
while (groupStart < stop)
{
uint32_t next = groupStart;
const CutoutVert& cutoutVert1 = cutoutMap[(uint32_t)pairs[next].i1];
int32_t currentOtherCutoutIndex = cutoutVert1.cutoutIndex;
const nvidia::NvVec3& vert1 = cutoutSet.cutoutLoops[(uint32_t)currentOtherCutoutIndex].vertices[(uint32_t)cutoutVert1.vertIndex];
uint32_t keep = groupStart;
float minDist2 = (vert0 - vert1).magnitudeSquared();
while (++next < stop)
{
const CutoutVert& cutoutVertNext = cutoutMap[(uint32_t)pairs[next].i1];
if (currentOtherCutoutIndex != cutoutVertNext.cutoutIndex)
{
break;
}
const nvidia::NvVec3& vertNext = cutoutSet.cutoutLoops[(uint32_t)cutoutVertNext.cutoutIndex].vertices[(uint32_t)cutoutVertNext.vertIndex];
const float dist2 = (vert0 - vertNext).magnitudeSquared();
if (dist2 < minDist2)
{
pairs[keep].set(-1, -1); // Invalidate
keep = next;
minDist2 = dist2;
}
else
{
pairs[next].set(-1, -1); // Invalidate
}
}
groupStart = next;
}
}
// Eliminate invalid pairs (compactify)
uint32_t pairCount = 0;
for (uint32_t i = 0; i < pairs.size(); ++i)
{
if (pairs[i].i0 >= 0 && pairs[i].i1 >= 0)
{
pairs[pairCount++] = pairs[i];
}
}
pairs.resize(pairCount);
// Snap points together
std::vector<bool> pinned(vertexCount, false);
for (uint32_t i = 0; i < pairCount; ++i)
{
const uint32_t i0 = (uint32_t)pairs[i].i0;
if (pinned[i0])
{
continue;
}
const CutoutVert& cutoutVert0 = cutoutMap[i0];
nvidia::NvVec3& vert0 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert0.cutoutIndex].vertices[(uint32_t)cutoutVert0.vertIndex];
const uint32_t i1 = (uint32_t)pairs[i].i1;
const CutoutVert& cutoutVert1 = cutoutMap[i1];
nvidia::NvVec3& vert1 = cutoutSet.cutoutLoops[(uint32_t)cutoutVert1.cutoutIndex].vertices[(uint32_t)cutoutVert1.vertIndex];
const nvidia::NvVec3 disp = vert1 - vert0;
// Move and pin
pinned[i0] = true;
if (pinned[i1])
{
vert0 = vert1;
}
else
{
vert0 += 0.5f * disp;
vert1 = vert0;
pinned[i1] = true;
}
}
}
static void eliminateStraightAngles(Nv::Blast::CutoutSetImpl& cutoutSet)
{
// Eliminate straight angles
for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i)
{
Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[i];
uint32_t oldSize;
do
{
oldSize = cutout.vertices.size();
for (uint32_t j = 0; j < cutout.vertices.size();)
{
// if( isOnBorder( cutout.vertices[j], width, height ) )
// { // Don't eliminate border vertices
// ++j;
// continue;
// }
if (perpendicularDistanceSquared(cutout.vertices, j) < CUTOUT_DISTANCE_EPS * CUTOUT_DISTANCE_EPS)
{
cutout.vertices.erase(cutout.vertices.begin() + j);
}
else
{
++j;
}
}
}
while (cutout.vertices.size() != oldSize);
}
}
static void removeTheSamePoints(Nv::Blast::CutoutSetImpl& cutoutSet)
{
for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i)
{
Nv::Blast::Cutout& cutout = cutoutSet.cutoutLoops[i];
uint32_t oldSize;
do
{
oldSize = cutout.vertices.size();
for (uint32_t j = 0; j < cutout.vertices.size();)
{
if ((cutout.vertices[(j + cutout.vertices.size() - 1) % cutout.vertices.size()] - cutout.vertices[j]).magnitudeSquared() < CUTOUT_DISTANCE_EPS * CUTOUT_DISTANCE_EPS)
{
cutout.vertices.erase(cutout.vertices.begin() + j);
}
else
{
++j;
}
}
} while (cutout.vertices.size() != oldSize);
}
}
static void simplifyCutoutSetImpl(Nv::Blast::CutoutSetImpl& cutoutSet, float threshold, uint32_t width, uint32_t height)
{
splitTJunctions(cutoutSet, 1.0f);
mergeVertices(cutoutSet, threshold, width, height);
eliminateStraightAngles(cutoutSet);
splitTJunctions(cutoutSet, 1.0f);
removeTheSamePoints(cutoutSet);
}
//static void cleanCutout(Nv::Blast::Cutout& cutout, uint32_t loopIndex, float tolerance)
//{
// Nv::Blast::ConvexLoop& loop = cutout.convexLoops[loopIndex];
// const float tolerance2 = tolerance * tolerance;
// uint32_t oldSize;
// do
// {
// oldSize = loop.polyVerts.size();
// uint32_t size = oldSize;
// for (uint32_t i = 0; i < size; ++i)
// {
// Nv::Blast::PolyVert& v0 = loop.polyVerts[(i + size - 1) % size];
// Nv::Blast::PolyVert& v1 = loop.polyVerts[i];
// Nv::Blast::PolyVert& v2 = loop.polyVerts[(i + 1) % size];
// if (perpendicularDistanceSquared(cutout.vertices[v0.index], cutout.vertices[v1.index], cutout.vertices[v2.index]) <= tolerance2)
// {
// loop.polyVerts.erase(loop.polyVerts.begin() + i);
// --size;
// --i;
// }
// }
// }
// while (loop.polyVerts.size() != oldSize);
//}
//static bool decomposeCutoutIntoConvexLoops(Nv::Blast::Cutout& cutout, float cleanupTolerance = 0.0f)
//{
// const uint32_t size = cutout.vertices.size();
//
// if (size < 3)
// {
// return false;
// }
//
// // Initialize to one loop, which may not be convex
// cutout.convexLoops.resize(1);
// cutout.convexLoops[0].polyVerts.resize(size);
//
// // See if the winding is ccw:
//
// // Scale to normalized size to avoid overflows
// nvidia::NvBounds3 bounds;
// bounds.setEmpty();
// for (uint32_t i = 0; i < size; ++i)
// {
// bounds.include(cutout.vertices[i]);
// }
// nvidia::NvVec3 center = bounds.getCenter();
// nvidia::NvVec3 extent = bounds.getExtents();
// if (extent[0] < NV_EPS_F32 || extent[1] < NV_EPS_F32)
// {
// return false;
// }
// const nvidia::NvVec3 scale(1.0f / extent[0], 1.0f / extent[1], 0.0f);
//
// // Find "area" (it will only be correct in sign!)
// nvidia::NvVec3 prevV = (cutout.vertices[size - 1] - center).multiply(scale);
// float area = 0.0f;
// for (uint32_t i = 0; i < size; ++i)
// {
// const nvidia::NvVec3 v = (cutout.vertices[i] - center).multiply(scale);
// area += crossZ(prevV, v);
// prevV = v;
// }
//
// if (nvidia::NvAbs(area) < NV_EPS_F32 * NV_EPS_F32)
// {
// return false;
// }
//
// const bool ccw = area > 0.0f;
//
// for (uint32_t i = 0; i < size; ++i)
// {
// Nv::Blast::PolyVert& vert = cutout.convexLoops[0].polyVerts[i];
// vert.index = (uint16_t)(ccw ? i : size - i - 1);
// vert.flags = 0;
// }
//
// const float cleanupTolerance2 = square(cleanupTolerance);
//
// // Find reflex vertices
// for (uint32_t i = 0; i < cutout.convexLoops.size();)
// {
// Nv::Blast::ConvexLoop& loop = cutout.convexLoops[i];
// const uint32_t loopSize = loop.polyVerts.size();
// if (loopSize <= 3)
// {
// ++i;
// continue;
// }
// uint32_t j = 0;
// for (; j < loopSize; ++j)
// {
// const nvidia::NvVec3& v0 = cutout.vertices[loop.polyVerts[(j + loopSize - 1) % loopSize].index];
// const nvidia::NvVec3& v1 = cutout.vertices[loop.polyVerts[j].index];
// const nvidia::NvVec3& v2 = cutout.vertices[loop.polyVerts[(j + 1) % loopSize].index];
// const nvidia::NvVec3 e0 = v1 - v0;
// if (crossZ(e0, v2 - v1) < 0.0f)
// {
// // reflex
// break;
// }
// }
// if (j < loopSize)
// {
// // Find a vertex
// float minLen2 = NV_MAX_F32;
// float maxMinDist = -NV_MAX_F32;
// uint32_t kToUse = 0;
// uint32_t mToUse = 2;
// bool cleanSliceFound = false; // A transversal is parallel with an edge
// for (uint32_t k = 0; k < loopSize; ++k)
// {
// const nvidia::NvVec3& vkPrev = cutout.vertices[loop.polyVerts[(k + loopSize - 1) % loopSize].index];
// const nvidia::NvVec3& vk = cutout.vertices[loop.polyVerts[k].index];
// const nvidia::NvVec3& vkNext = cutout.vertices[loop.polyVerts[(k + 1) % loopSize].index];
// const uint32_t mStop = k ? loopSize : loopSize - 1;
// for (uint32_t m = k + 2; m < mStop; ++m)
// {
// const nvidia::NvVec3& vmPrev = cutout.vertices[loop.polyVerts[(m + loopSize - 1) % loopSize].index];
// const nvidia::NvVec3& vm = cutout.vertices[loop.polyVerts[m].index];
// const nvidia::NvVec3& vmNext = cutout.vertices[loop.polyVerts[(m + 1) % loopSize].index];
// const nvidia::NvVec3 newEdge = vm - vk;
// if (!directionsXYOrderedCCW(vk - vkPrev, newEdge, vkNext - vk) ||
// !directionsXYOrderedCCW(vm - vmPrev, -newEdge, vmNext - vm))
// {
// continue;
// }
// const float len2 = newEdge.magnitudeSquared();
// float minDist = NV_MAX_F32;
// for (uint32_t l = 0; l < loopSize; ++l)
// {
// const uint32_t l1 = (l + 1) % loopSize;
// if (l == k || l1 == k || l == m || l1 == m)
// {
// continue;
// }
// const nvidia::NvVec3& vl = cutout.vertices[loop.polyVerts[l].index];
// const nvidia::NvVec3& vl1 = cutout.vertices[loop.polyVerts[l1].index];
// const float dist = segmentsIntersectXY(vl, vl1 - vl, vk, newEdge);
// if (dist < minDist)
// {
// minDist = dist;
// }
// }
// if (minDist <= 0.0f)
// {
// if (minDist > maxMinDist)
// {
// maxMinDist = minDist;
// kToUse = k;
// mToUse = m;
// }
// }
// else
// {
// if (perpendicularDistanceSquared(vkPrev, vk, vm) <= cleanupTolerance2 ||
// perpendicularDistanceSquared(vk, vm, vmNext) <= cleanupTolerance2)
// {
// if (!cleanSliceFound)
// {
// minLen2 = len2;
// kToUse = k;
// mToUse = m;
// }
// else
// {
// if (len2 < minLen2)
// {
// minLen2 = len2;
// kToUse = k;
// mToUse = m;
// }
// }
// cleanSliceFound = true;
// }
// else if (!cleanSliceFound && len2 < minLen2)
// {
// minLen2 = len2;
// kToUse = k;
// mToUse = m;
// }
// }
// }
// }
// cutout.convexLoops.push_back(Nv::Blast::ConvexLoop());
// Nv::Blast::ConvexLoop& newLoop = cutout.convexLoops.back();
// Nv::Blast::ConvexLoop& oldLoop = cutout.convexLoops[i];
// newLoop.polyVerts.resize(mToUse - kToUse + 1);
// for (uint32_t n = 0; n <= mToUse - kToUse; ++n)
// {
// newLoop.polyVerts[n] = oldLoop.polyVerts[kToUse + n];
// }
// newLoop.polyVerts[mToUse - kToUse].flags = 1; // Mark this vertex (and edge that follows) as a split edge
// oldLoop.polyVerts[kToUse].flags = 1; // Mark this vertex (and edge that follows) as a split edge
// oldLoop.polyVerts.erase(oldLoop.polyVerts.begin() + kToUse + 1, oldLoop.polyVerts.begin() + (mToUse - (kToUse + 1)));
// if (cleanupTolerance > 0.0f)
// {
// cleanCutout(cutout, i, cleanupTolerance);
// cleanCutout(cutout, cutout.convexLoops.size() - 1, cleanupTolerance);
// }
// }
// else
// {
// if (cleanupTolerance > 0.0f)
// {
// cleanCutout(cutout, i, cleanupTolerance);
// }
// ++i;
// }
// }
//
// return true;
//}
static void traceRegion(std::vector<POINT2D>& trace, Map2d<uint32_t>& regions, Map2d<uint8_t>& pathCounts, uint32_t regionIndex, const POINT2D& startPoint)
{
POINT2D t = startPoint;
trace.clear();
trace.push_back(t);
++pathCounts(t.x, t.y); // Increment path count
// Find initial path direction
int32_t dirN;
uint32_t previousRegion = 0xFFFFFFFF;
for (dirN = 0; dirN < 8; ++dirN) //TODO Should we start from dirN = 0?
{
const POINT2D t1 = POINT2D(t.x + taxicabSine(dirN + 2), t.y + taxicabSine(dirN));
if (regions(t1.x, t1.y) != regionIndex && previousRegion == regionIndex)
{
break;
}
previousRegion = regions(t1.x, t1.y);
}
bool done = false;
do
{
for (int32_t i = 1; i < 8; ++i) // Skip direction we just came from
{
--dirN;
const POINT2D t1 = POINT2D(t.x + taxicabSine(dirN + 2), t.y + taxicabSine(dirN));
if (regions(t1.x, t1.y) != regionIndex)
{
if (t1.x == trace[0].x && t1.y == trace[0].y)
{
done = true;
break;
}
trace.push_back(t1);
t = t1;
++pathCounts(t.x, t.y); // Increment path count
dirN += 4;
break;
}
}
} while (!done && dirN >= 0);
//NvBlast GWD-399: Try to fix bad corners
int32_t sz = (int32_t)trace.size();
if (sz > 4)
{
struct CornerPixel
{
int32_t id;
POINT2D p;
CornerPixel(int32_t id, int32_t x, int32_t y) : id(id), p(x, y) { }
};
std::vector <CornerPixel> cp;
int32_t xb = 0, yb = 0; //bit buffer stores 1 if value do not changed from preview point and 0 otherwise (5 bits is used)
for (int32_t i = -4; i < sz; i++) //fill buffer with 4 elements from the end of trace
{
//idx, idx - 1, idx - 2, idx - 3 values with correct indexing to trace
int32_t idx = (sz + i) % sz, idx_ = (sz + i - 1) % sz, idx__ = (sz + i - 2) % sz, idx___ = (sz + i - 3) % sz;
//update buffer
xb <<= 1;
yb <<= 1;
xb += (trace[idx].x - trace[idx_].x) == 0;
yb += (trace[idx].y - trace[idx_].y) == 0;
//filter buffer for 11100-00111 or 00111-11100 corner patterns
if (i >= 0 && ((xb & 0x1F) ^ (yb & 0x1F)) == 0x1B)
{
if ((xb & 3) == 3)
{
if (((yb >> 3) & 3) == 3)
{
cp.push_back(CornerPixel(idx__, trace[idx].x, trace[idx___].y));
}
}
else if ((yb & 3) == 3)
{
if (((xb >> 3) & 3) == 3)
{
cp.push_back(CornerPixel(idx__, trace[idx___].x, trace[idx].y));
}
}
}
}
std::sort(cp.begin(), cp.end(), [](const CornerPixel& cp1, const CornerPixel& cp2) -> bool
{
return cp1.id > cp2.id;
});
for (auto it = cp.begin(); it != cp.end(); it++)
{
trace.insert(trace.begin() + it->id, it->p);
++pathCounts(it->p.x, it->p.y);
}
}
}
void Nv::Blast::createCutoutSet(Nv::Blast::CutoutSetImpl& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight,
float segmentationErrorThreshold, float snapThreshold, bool periodic, bool expandGaps)
{
cutoutSet.cutouts.clear();
cutoutSet.cutoutLoops.clear();
cutoutSet.periodic = periodic;
cutoutSet.dimensions = nvidia::NvVec2((float)bufferWidth, (float)bufferHeight);
if (!periodic)
{
cutoutSet.dimensions[0] += 1.0f;
cutoutSet.dimensions[1] += 1.0f;
}
if (pixelBuffer == NULL || bufferWidth == 0 || bufferHeight == 0)
{
return;
}
const int borderPad = periodic ? 0 : 2; // Padded for borders if not periodic
const int originCoord = periodic ? 0 : 1;
BitMap map(bufferWidth + borderPad, bufferHeight + borderPad, 0);
map.setOrigin((uint32_t)originCoord, (uint32_t)originCoord);
bool hasBorder = false;
for (uint32_t y = 0; y < bufferHeight; ++y)
{
for (uint32_t x = 0; x < bufferWidth; ++x)
{
const uint32_t pix = 5033165 * (uint32_t)pixelBuffer[0] + 9898557 * (uint32_t)pixelBuffer[1] + 1845494 * (uint32_t)pixelBuffer[2];
pixelBuffer += 3;
if ((pix >> 28) != 0)
{
map.set((int32_t)x, (int32_t)y);
hasBorder = true;
}
}
}
// Add borders if not tiling
if (!periodic)
{
for (int32_t x = -1; x <= (int32_t)bufferWidth; ++x)
{
map.set(x, -1);
map.set(x, (int32_t)bufferHeight);
}
for (int32_t y = -1; y <= (int32_t)bufferHeight; ++y)
{
map.set(-1, y);
map.set((int32_t)bufferWidth, y);
}
}
// Now search for regions
// Create a region map
Map2d<uint32_t> regions(bufferWidth + borderPad, bufferHeight + borderPad, 0xFFFFFFFF); // Initially an invalid value
regions.setOrigin((uint32_t)originCoord, (uint32_t)originCoord);
// Create a path counting map
Map2d<uint8_t> pathCounts(bufferWidth + borderPad, bufferHeight + borderPad, 0);
pathCounts.setOrigin((uint32_t)originCoord, (uint32_t)originCoord);
// Bump path counts on borders
if (!periodic)
{
for (int32_t x = -1; x <= (int32_t)bufferWidth; ++x)
{
pathCounts(x, -1) = 1;
pathCounts(x, (int32_t)bufferHeight) = 1;
}
for (int32_t y = -1; y <= (int32_t)bufferHeight; ++y)
{
pathCounts(-1, y) = 1;
pathCounts((int32_t)bufferWidth, y) = 1;
}
}
std::vector<POINT2D> stack;
std::vector<uint32_t> newCutout;
std::vector<POINT2D> traceStarts;
std::vector<std::vector<POINT2D>* > traces;
std::set<uint64_t> regionBoundary;
// Initial fill of region maps and path maps
for (int32_t y = 0; y < (int32_t)bufferHeight; ++y)
{
for (int32_t x = 0; x < (int32_t)bufferWidth; ++x)
{
if (map.read(x - 1, y) && !map.read(x, y))
{
// Found an empty spot next to a filled spot
POINT2D t(x - 1, y);
const uint32_t regionIndex = traceStarts.size();
newCutout.push_back(traces.size());
traceStarts.push_back(t); // Save off initial point
traces.push_back(new std::vector<POINT2D>());
NVBLAST_ASSERT(traces.size() == traceStarts.size()); // This must be the same size as traceStarts
//traces.back() = (std::vector<POINT2D>*)NVBLAST_ALLOC(sizeof(std::vector<POINT2D>), NV_DEBUG_EXP("CutoutPoint2DSet"));
//new(traces.back()) std::vector<POINT2D>;
// Flood fill region map
std::set<uint64_t> visited;
stack.push_back(POINT2D(x, y));
#define COMPRESS(x, y) (((uint64_t)(x) << 32) + (y))
visited.insert(COMPRESS(x, y));
do
{
const POINT2D s = stack.back();
stack.pop_back();
map.set(s.x, s.y);
regions(s.x, s.y) = regionIndex;
POINT2D n;
for (int32_t i = 0; i < 4; ++i)
{
const int32_t i0 = i & 1;
const int32_t i1 = (i >> 1) & 1;
n.x = s.x + i0 - i1;
n.y = s.y + i0 + i1 - 1;
if (visited.find(COMPRESS(n.x, n.y)) == visited.end())
{
if (!map.read(n.x, n.y))
{
stack.push_back(n);
visited.insert(COMPRESS(n.x, n.y));
}
else
{
regionBoundary.insert(COMPRESS(n.x, n.y));
}
}
}
} while (stack.size());
// Trace region
NVBLAST_ASSERT(map.read(t.x, t.y));
std::vector<POINT2D>* trace = traces.back();
traceRegion(*trace, regions, pathCounts, regionIndex, t);
//Find innner traces
while(true)
{
for (auto& point : *trace)
{
regionBoundary.erase(COMPRESS(point.x, point.y));
}
if (trace->size() < 4)
{
trace->~vector<POINT2D>();
delete trace;
traces.pop_back();
traceStarts.pop_back();
}
if (!regionBoundary.empty())
{
auto it = regionBoundary.begin();
t.x = *it >> 32;
t.y = *it & 0xFFFFFFFF;
traces.push_back(new std::vector<POINT2D>());
traceStarts.push_back(t);
trace = traces.back();
traceRegion(*trace, regions, pathCounts, regionIndex, t);
continue;
}
break;
}
#undef COMPRESS
}
}
}
uint32_t cutoutCount = traces.size();
//find internal traces
// Now expand regions until the paths completely overlap
if (expandGaps)
{
bool somePathChanged;
int sanityCounter = 1000;
bool abort = false;
do
{
somePathChanged = false;
for (uint32_t i = 0; i < cutoutCount; ++i)
{
if (traces[i] == nullptr)
{
continue;
}
uint32_t regionIndex = 0;
for (uint32_t c : newCutout)
{
if (i >= c)
{
regionIndex = c;
}
else
{
break;
}
}
bool pathChanged = false;
std::vector<POINT2D>& trace = *traces[i];
for (size_t j = 0; j < trace.size(); ++j)
{
const POINT2D& t = trace[j];
if (pathCounts(t.x, t.y) == 1)
{
if (regions(t.x, t.y) == 0xFFFFFFFF)
{
regions(t.x, t.y) = regionIndex;
pathChanged = true;
}
else
{
trace.erase(trace.begin() + j--);
}
}
}
if (pathChanged)
{
// Recalculate cutout
// Decrement pathCounts
for (uint32_t j = 0; j < trace.size(); ++j)
{
const POINT2D& t = trace[j];
--pathCounts(t.x, t.y);
}
// Erase trace
// Calculate new start point
POINT2D& t = traceStarts[i];
POINT2D t1 = t;
abort = true;
for (int32_t dirN = 0; dirN < 8; ++dirN)
{
t1 = POINT2D(t.x + taxicabSine(dirN + 2), t.y + taxicabSine(dirN));
if (regions(t1.x, t1.y) != regionIndex)
{
t = t1;
abort = false;
break;
}
}
if (abort)
{
break;
}
traceRegion(trace, regions, pathCounts, regionIndex, t);
somePathChanged = true;
}
}
if (--sanityCounter <= 0)
{
abort = true;
break;
}
} while (somePathChanged);
if (abort)
{
for (uint32_t i = 0; i < cutoutCount; ++i)
{
traces[i]->~vector<POINT2D>();
delete traces[i];
}
cutoutCount = 0;
}
}
// Create cutouts
cutoutSet.cutouts = newCutout;
cutoutSet.cutouts.push_back(cutoutCount);
cutoutSet.cutoutLoops.resize(cutoutCount);
for (uint32_t i = 0; i < cutoutCount; ++i)
{
createCutout(cutoutSet.cutoutLoops[i], *traces[i], segmentationErrorThreshold, snapThreshold, bufferWidth, bufferHeight, !cutoutSet.periodic);
}
if (expandGaps)
{
simplifyCutoutSetImpl(cutoutSet, snapThreshold, bufferWidth, bufferHeight);
}
// Release traces
for (uint32_t i = 0; i < cutoutCount; ++i)
{
if (traces[i] != nullptr)
{
traces[i]->~vector<POINT2D>();
delete traces[i];
}
}
// Decompose each cutout in the set into convex loops
//uint32_t cutoutSetSize = 0;
//for (uint32_t i = 0; i < cutoutSet.cutoutLoops.size(); ++i)
//{
// bool success = decomposeCutoutIntoConvexLoops(cutoutSet.cutoutLoops[i]);
// if (success)
// {
// if (cutoutSetSize != i)
// {
// cutoutSet.cutouts[cutoutSetSize] = cutoutSet.cutoutLoops[i];
// }
// ++cutoutSetSize;
// }
//}
//cutoutSet.cutoutLoops.resize(cutoutSetSize);
//Check if single cutout spread to the whole area for non periodic (no need to cutout then)
if (!periodic && cutoutSet.cutoutLoops.size() == 1 && (expandGaps || !hasBorder))
{
cutoutSet.cutoutLoops.clear();
}
}
class Matrix22
{
public:
//! Default constructor
Matrix22()
{}
//! Construct from two base vectors
Matrix22(const nvidia::NvVec2& col0, const nvidia::NvVec2& col1)
: column0(col0), column1(col1)
{}
//! Construct from float[4]
explicit Matrix22(float values[]):
column0(values[0],values[1]),
column1(values[2],values[3])
{
}
//! Copy constructor
Matrix22(const Matrix22& other)
: column0(other.column0), column1(other.column1)
{}
//! Assignment operator
Matrix22& operator=(const Matrix22& other)
{
column0 = other.column0;
column1 = other.column1;
return *this;
}
//! Set to identity matrix
static Matrix22 createIdentity()
{
return Matrix22(nvidia::NvVec2(1,0), nvidia::NvVec2(0,1));
}
//! Set to zero matrix
static Matrix22 createZero()
{
return Matrix22(nvidia::NvVec2(0.0f), nvidia::NvVec2(0.0f));
}
//! Construct from diagonal, off-diagonals are zero.
static Matrix22 createDiagonal(const nvidia::NvVec2& d)
{
return Matrix22(nvidia::NvVec2(d.x,0.0f), nvidia::NvVec2(0.0f,d.y));
}
//! Get transposed matrix
Matrix22 getTranspose() const
{
const nvidia::NvVec2 v0(column0.x, column1.x);
const nvidia::NvVec2 v1(column0.y, column1.y);
return Matrix22(v0,v1);
}
//! Get the real inverse
Matrix22 getInverse() const
{
const float det = getDeterminant();
Matrix22 inverse;
if(det != 0)
{
const float invDet = 1.0f/det;
inverse.column0[0] = invDet * column1[1];
inverse.column0[1] = invDet * (-column0[1]);
inverse.column1[0] = invDet * (-column1[0]);
inverse.column1[1] = invDet * column0[0];
return inverse;
}
else
{
return createIdentity();
}
}
//! Get determinant
float getDeterminant() const
{
return column0[0] * column1[1] - column0[1] * column1[0];
}
//! Unary minus
Matrix22 operator-() const
{
return Matrix22(-column0, -column1);
}
//! Add
Matrix22 operator+(const Matrix22& other) const
{
return Matrix22( column0+other.column0,
column1+other.column1);
}
//! Subtract
Matrix22 operator-(const Matrix22& other) const
{
return Matrix22( column0-other.column0,
column1-other.column1);
}
//! Scalar multiplication
Matrix22 operator*(float scalar) const
{
return Matrix22(column0*scalar, column1*scalar);
}
//! Matrix vector multiplication (returns 'this->transform(vec)')
nvidia::NvVec2 operator*(const nvidia::NvVec2& vec) const
{
return transform(vec);
}
//! Matrix multiplication
Matrix22 operator*(const Matrix22& other) const
{
//Rows from this <dot> columns from other
//column0 = transform(other.column0) etc
return Matrix22(transform(other.column0), transform(other.column1));
}
// a <op>= b operators
//! Equals-add
Matrix22& operator+=(const Matrix22& other)
{
column0 += other.column0;
column1 += other.column1;
return *this;
}
//! Equals-sub
Matrix22& operator-=(const Matrix22& other)
{
column0 -= other.column0;
column1 -= other.column1;
return *this;
}
//! Equals scalar multiplication
Matrix22& operator*=(float scalar)
{
column0 *= scalar;
column1 *= scalar;
return *this;
}
//! Element access, mathematical way!
float operator()(unsigned int row, unsigned int col) const
{
return (*this)[col][(int)row];
}
//! Element access, mathematical way!
float& operator()(unsigned int row, unsigned int col)
{
return (*this)[col][(int)row];
}
// Transform etc
//! Transform vector by matrix, equal to v' = M*v
nvidia::NvVec2 transform(const nvidia::NvVec2& other) const
{
return column0*other.x + column1*other.y;
}
nvidia::NvVec2& operator[](unsigned int num) {return (&column0)[num];}
const nvidia::NvVec2& operator[](unsigned int num) const {return (&column0)[num];}
//Data, see above for format!
nvidia::NvVec2 column0, column1; //the two base vectors
};
bool calculateUVMapping(const Nv::Blast::Triangle& triangle, nvidia::NvMat33& theResultMapping)
{
nvidia::NvMat33 rMat;
nvidia::NvMat33 uvMat;
for (unsigned col = 0; col < 3; ++col)
{
auto v = (&triangle.a)[col];
rMat[col] = toNvShared(v.p);
uvMat[col] = nvidia::NvVec3(v.uv[0].x, v.uv[0].y, 1.0f);
}
if (uvMat.getDeterminant() == 0.0f)
{
return false;
}
theResultMapping = rMat*uvMat.getInverse();
return true;
}
//static bool calculateUVMapping(ExplicitHierarchicalMesh& theHMesh, const nvidia::NvVec3& theDir, nvidia::NvMat33& theResultMapping)
//{
// nvidia::NvVec3 cutoutDir( theDir );
// cutoutDir.normalize( );
//
// const float cosineThreshold = nvidia::NvCos(3.141593f / 180); // 1 degree
//
// ExplicitRenderTriangle* triangleToUse = NULL;
// float greatestCosine = -NV_MAX_F32;
// float greatestArea = 0.0f; // for normals within the threshold
// for ( uint32_t partIndex = 0; partIndex < theHMesh.partCount(); ++partIndex )
// {
// ExplicitRenderTriangle* theTriangles = theHMesh.meshTriangles( partIndex );
// uint32_t triangleCount = theHMesh.meshTriangleCount( partIndex );
// for ( uint32_t tIndex = 0; tIndex < triangleCount; ++tIndex )
// {
// ExplicitRenderTriangle& theTriangle = theTriangles[tIndex];
// nvidia::NvVec3 theEdge1 = theTriangle.vertices[1].position - theTriangle.vertices[0].position;
// nvidia::NvVec3 theEdge2 = theTriangle.vertices[2].position - theTriangle.vertices[0].position;
// nvidia::NvVec3 theNormal = theEdge1.cross( theEdge2 );
// float theArea = theNormal.normalize(); // twice the area, but that's ok
//
// if (theArea == 0.0f)
// {
// continue;
// }
//
// const float cosine = cutoutDir.dot(theNormal);
//
// if (cosine < cosineThreshold)
// {
// if (cosine > greatestCosine && greatestArea == 0.0f)
// {
// greatestCosine = cosine;
// triangleToUse = &theTriangle;
// }
// }
// else
// {
// if (theArea > greatestArea)
// {
// greatestArea = theArea;
// triangleToUse = &theTriangle;
// }
// }
// }
// }
//
// if (triangleToUse == NULL)
// {
// return false;
// }
//
// return calculateUVMapping(*triangleToUse, theResultMapping);
//}
//bool calculateCutoutUVMapping(ExplicitHierarchicalMesh& hMesh, const nvidia::NvVec3& targetDirection, nvidia::NvMat33& theMapping)
//{
// return ::calculateUVMapping(hMesh, targetDirection, theMapping);
//}
//bool calculateCutoutUVMapping(const Nv::Blast::Triangle& targetDirection, nvidia::NvMat33& theMapping)
//{
// return ::calculateUVMapping(targetDirection, theMapping);
//}
const NvcVec3& CutoutSetImpl::getCutoutVertex(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const
{
return fromNvShared(cutoutLoops[cutouts[cutoutIndex] + loopIndex].vertices[vertexIndex]);
}
const NvcVec2& CutoutSetImpl::getDimensions() const
{
return fromNvShared(dimensions);
}
| 80,008 | C++ | 30.787445 | 190 | 0.519748 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringCutoutImpl.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTAUTHORINGFCUTOUTIMPL_H
#define NVBLASTAUTHORINGFCUTOUTIMPL_H
#include "NvBlastExtAuthoringCutout.h"
#include <vector>
#include "NvVec2.h"
#include "NvVec3.h"
#include "NvMat44.h"
namespace Nv
{
namespace Blast
{
struct PolyVert
{
uint16_t index;
uint16_t flags;
};
struct ConvexLoop
{
std::vector<PolyVert> polyVerts;
};
struct Cutout
{
std::vector<nvidia::NvVec3> vertices;
//std::vector<ConvexLoop> convexLoops;
std::vector<nvidia::NvVec3> smoothingGroups;
};
struct POINT2D
{
POINT2D() {}
POINT2D(int32_t _x, int32_t _y) : x(_x), y(_y) {}
int32_t x;
int32_t y;
bool operator==(const POINT2D& other) const
{
return x == other.x && y == other.y;
}
bool operator<(const POINT2D& other) const
{
if (x == other.x) return y < other.y;
return x < other.x;
}
};
struct CutoutSetImpl : public CutoutSet
{
CutoutSetImpl() : periodic(false), dimensions(0.0f)
{
}
uint32_t getCutoutCount() const
{
return (uint32_t)cutouts.size() - 1;
}
uint32_t getCutoutVertexCount(uint32_t cutoutIndex, uint32_t loopIndex) const
{
return (uint32_t)cutoutLoops[cutouts[cutoutIndex] + loopIndex].vertices.size();
}
uint32_t getCutoutLoopCount(uint32_t cutoutIndex) const
{
return (uint32_t)cutouts[cutoutIndex + 1] - cutouts[cutoutIndex];
}
const NvcVec3& getCutoutVertex(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const;
bool isCutoutVertexToggleSmoothingGroup(uint32_t cutoutIndex, uint32_t loopIndex, uint32_t vertexIndex) const
{
auto& vRef = cutoutLoops[cutouts[cutoutIndex] + loopIndex].vertices[vertexIndex];
for (auto& v : cutoutLoops[cutouts[cutoutIndex] + loopIndex].smoothingGroups)
{
if ((vRef - v).magnitudeSquared() < 1e-5)
{
return true;
}
}
return false;
}
bool isPeriodic() const
{
return periodic;
}
const NvcVec2& getDimensions() const;
//void serialize(nvidia::NvFileBuf& stream) const;
//void deserialize(nvidia::NvFileBuf& stream);
void release()
{
delete this;
}
std::vector<Cutout> cutoutLoops;
std::vector<uint32_t> cutouts;
bool periodic;
nvidia::NvVec2 dimensions;
};
void createCutoutSet(Nv::Blast::CutoutSetImpl& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight,
float segmentationErrorThreshold, float snapThreshold, bool periodic, bool expandGaps);
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTAUTHORINGFCUTOUTIMPL_H
| 4,391 | C | 29.929577 | 130 | 0.670462 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringCollisionBuilderImpl.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include <NvBlastGlobals.h>
#include "NvBlastExtAuthoringCollisionBuilderImpl.h"
#include <NvBlastExtApexSharedParts.h>
#include <NvBlastExtAuthoringInternalCommon.h>
#include <NvBlastExtAuthoringBooleanToolImpl.h>
#include <NvBlastExtAuthoringMeshImpl.h>
#include <NvBlastExtAuthoringMeshUtils.h>
#include <NvBlastNvSharedHelpers.h>
#include <VHACD.h>
#include <vector>
using namespace nvidia;
namespace Nv
{
namespace Blast
{
#define SAFE_ARRAY_NEW(T, x) ((x) > 0) ? reinterpret_cast<T*>(NVBLAST_ALLOC(sizeof(T) * (x))) : nullptr;
#define SAFE_ARRAY_DELETE(x) if (x != nullptr) {NVBLAST_FREE(x); x = nullptr;}
void trimCollisionGeometry(ConvexMeshBuilder& cmb, uint32_t chunksCount, CollisionHull** in, const uint32_t* chunkDepth)
{
std::vector<std::vector<NvPlane> > chunkMidplanes(chunksCount);
std::vector<NvVec3> centers(chunksCount);
std::vector<NvBounds3> hullsBounds(chunksCount);
for (uint32_t i = 0; i < chunksCount; ++i)
{
hullsBounds[i].setEmpty();
centers[i] = NvVec3(0, 0, 0);
for (uint32_t p = 0; p < in[i]->pointsCount; ++p)
{
centers[i] += toNvShared(in[i]->points[p]);
hullsBounds[i].include(toNvShared(in[i]->points[p]));
}
centers[i] = hullsBounds[i].getCenter();
}
Separation params;
for (uint32_t hull = 0; hull < chunksCount; ++hull)
{
for (uint32_t hull2 = hull + 1; hull2 < chunksCount; ++hull2)
{
if (chunkDepth[hull] != chunkDepth[hull2])
{
continue;
}
if (importerHullsInProximityApexFree(in[hull]->pointsCount, toNvShared(in[hull]->points), hullsBounds[hull],
NvTransform(NvIdentity), NvVec3(1, 1, 1), in[hull2]->pointsCount,
toNvShared(in[hull2]->points), hullsBounds[hull2], NvTransform(NvIdentity),
NvVec3(1, 1, 1), 0.0, ¶ms) == false)
{
continue;
}
NvVec3 c1 = centers[hull];
NvVec3 c2 = centers[hull2];
float d = FLT_MAX;
NvVec3 n1;
NvVec3 n2;
for (uint32_t p = 0; p < in[hull]->pointsCount; ++p)
{
float ld = (toNvShared(in[hull]->points[p]) - c2).magnitude();
if (ld < d)
{
n1 = toNvShared(in[hull]->points[p]);
d = ld;
}
}
d = FLT_MAX;
for (uint32_t p = 0; p < in[hull2]->pointsCount; ++p)
{
float ld = (toNvShared(in[hull2]->points[p]) - c1).magnitude();
if (ld < d)
{
n2 = toNvShared(in[hull2]->points[p]);
d = ld;
}
}
NvVec3 dir = c2 - c1;
NvPlane pl = NvPlane((n1 + n2) * 0.5, dir.getNormalized());
chunkMidplanes[hull].push_back(pl);
NvPlane pl2 = NvPlane((n1 + n2) * 0.5, -dir.getNormalized());
chunkMidplanes[hull2].push_back(pl2);
}
}
std::vector<NvVec3> hPoints;
for (uint32_t i = 0; i < chunksCount; ++i)
{
std::vector<Facet> facets;
std::vector<Vertex> vertices;
std::vector<Edge> edges;
for (uint32_t fc = 0; fc < in[i]->polygonDataCount; ++fc)
{
Facet nFc;
nFc.firstEdgeNumber = edges.size();
auto& pd = in[i]->polygonData[fc];
uint32_t n = pd.vertexCount;
for (uint32_t ed = 0; ed < n; ++ed)
{
uint32_t vr1 = in[i]->indices[(ed) + pd.indexBase];
uint32_t vr2 = in[i]->indices[(ed + 1) % n + pd.indexBase];
edges.push_back({vr1, vr2});
}
nFc.edgesCount = n;
facets.push_back(nFc);
}
vertices.resize(in[i]->pointsCount);
for (uint32_t vr = 0; vr < in[i]->pointsCount; ++vr)
{
vertices[vr].p = in[i]->points[vr];
}
Mesh* hullMesh = new MeshImpl(vertices.data(), edges.data(), facets.data(), vertices.size(), edges.size(), facets.size());
BooleanEvaluator evl;
//I think the material ID is unused for collision meshes so harcoding MATERIAL_INTERIOR is ok
Mesh* cuttingMesh = getCuttingBox(NvVec3(0, 0, 0), NvVec3(0, 0, 1), 40, 0, kMaterialInteriorId);
for (uint32_t p = 0; p < chunkMidplanes[i].size(); ++p)
{
NvPlane& pl = chunkMidplanes[i][p];
setCuttingBox(pl.pointInPlane(), pl.n.getNormalized(), cuttingMesh, 60, 0);
evl.performFastCutting(hullMesh, cuttingMesh, BooleanConfigurations::BOOLEAN_DIFFERENCE());
Mesh* result = evl.createNewMesh();
if (result == nullptr)
{
break;
}
delete hullMesh;
hullMesh = result;
}
delete cuttingMesh;
if (hullMesh == nullptr)
{
continue;
}
hPoints.clear();
hPoints.resize(hullMesh->getVerticesCount());
for (uint32_t v = 0; v < hullMesh->getVerticesCount(); ++v)
{
hPoints[v] = toNvShared(hullMesh->getVertices()[v].p);
}
delete hullMesh;
if (in[i] != nullptr)
{
delete in[i];
}
in[i] = cmb.buildCollisionGeometry(hPoints.size(), fromNvShared(hPoints.data()));
}
}
int32_t buildMeshConvexDecomposition(ConvexMeshBuilder& cmb, const Triangle* mesh, uint32_t triangleCount,
const ConvexDecompositionParams& iparams, CollisionHull**& convexes)
{
std::vector<float> coords(triangleCount * 9);
std::vector<uint32_t> indices(triangleCount * 3);
uint32_t indx = 0;
uint32_t indxCoord = 0;
NvBounds3 chunkBound = NvBounds3::empty();
for (uint32_t i = 0; i < triangleCount; ++i)
{
for (auto& t : { mesh[i].a.p , mesh[i].b.p , mesh[i].c.p })
{
chunkBound.include(toNvShared(t));
coords[indxCoord] = t.x;
coords[indxCoord + 1] = t.y;
coords[indxCoord + 2] = t.z;
indxCoord += 3;
}
indices[indx] = indx;
indices[indx + 1] = indx + 1;
indices[indx + 2] = indx + 2;
indx += 3;
}
NvVec3 rsc = chunkBound.getDimensions();
for (uint32_t i = 0; i < coords.size(); i += 3)
{
coords[i] = (coords[i] - chunkBound.minimum.x) / rsc.x;
coords[i + 1] = (coords[i + 1] - chunkBound.minimum.y) / rsc.y;
coords[i + 2] = (coords[i + 2] - chunkBound.minimum.z) / rsc.z;
}
VHACD::IVHACD* decomposer = VHACD::CreateVHACD();
VHACD::IVHACD::Parameters vhacdParam;
vhacdParam.m_maxConvexHulls = iparams.maximumNumberOfHulls;
vhacdParam.m_resolution = iparams.voxelGridResolution;
vhacdParam.m_concavity = iparams.concavity;
vhacdParam.m_oclAcceleration = false;
//TODO vhacdParam.m_callback
vhacdParam.m_minVolumePerCH = 0.003f; // 1.f / (3 * vhacdParam.m_resolution ^ (1 / 3));
decomposer->Compute(coords.data(), triangleCount * 3, indices.data(), triangleCount, vhacdParam);
const uint32_t nConvexHulls = decomposer->GetNConvexHulls();
convexes = SAFE_ARRAY_NEW(CollisionHull*, nConvexHulls);
for (uint32_t i = 0; i < nConvexHulls; ++i)
{
VHACD::IVHACD::ConvexHull hl;
decomposer->GetConvexHull(i, hl);
std::vector<NvVec3> vertices;
for (uint32_t v = 0; v < hl.m_nPoints; ++v)
{
vertices.push_back(NvVec3(hl.m_points[v * 3], hl.m_points[v * 3 + 1], hl.m_points[v * 3 + 2]));
vertices.back().x = vertices.back().x * rsc.x + chunkBound.minimum.x;
vertices.back().y = vertices.back().y * rsc.y + chunkBound.minimum.y;
vertices.back().z = vertices.back().z * rsc.z + chunkBound.minimum.z;
}
convexes[i] = cmb.buildCollisionGeometry(vertices.size(), fromNvShared(vertices.data()));
}
//VHACD::~VHACD called from release does nothign and does not call Clean()
decomposer->Clean();
decomposer->Release();
return nConvexHulls;
}
} // namespace Blast
} // namespace Nv
| 9,910 | C++ | 37.866667 | 130 | 0.580626 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshUtils.h | #include <cinttypes>
#include <map>
#include <set>
#include <vector>
#include "NvBlastExtAuthoringTypes.h"
namespace nvidia
{
class NvVec3;
};
namespace Nv
{
namespace Blast
{
class Mesh;
/**
Helper functions
*/
/**
Set cutting box at some particular position.
\param[in] point Cutting face center
\param[in] normal Cutting face normal
\param[in] mesh Cutting box mesh
\param[in] size Cutting box size
\param[in] id Cutting box ID
*/
void setCuttingBox(const nvidia::NvVec3& point, const nvidia::NvVec3& normal, Mesh* mesh, float size, int64_t id);
/**
Create cutting box at some particular position.
\param[in] point Cutting face center
\param[in] normal Cutting face normal
\param[in] size Cutting box size
\param[in] id Cutting box ID
*/
Mesh* getCuttingBox(const nvidia::NvVec3& point, const nvidia::NvVec3& normal, float size, int64_t id, int32_t interiorMaterialId);
/**
Create box at some particular position.
\param[in] point Cutting face center
\param[in] size Cutting box size
*/
Mesh* getBigBox(const nvidia::NvVec3& point, float size, int32_t interiorMaterialId);
/**
Create slicing box with noisy cutting surface.
\param[in] point Cutting face center
\param[in] normal Cutting face normal
\param[in] size Cutting box size
\param[in] jaggedPlaneSize Noisy surface size
\param[in] resolution Noisy surface resolution
\param[in] id Cutting box ID
\param[in] amplitude Noise amplitude
\param[in] frequency Noise frequency
\param[in] octaves Noise octaves
\param[in] seed Random generator seed, used for noise generation.
*/
Mesh* getNoisyCuttingBoxPair(const nvidia::NvVec3& point, const nvidia::NvVec3& normal, float size, float jaggedPlaneSize, nvidia::NvVec3 resolution, int64_t id, float amplitude, float frequency, int32_t octaves, int32_t seed, int32_t interiorMaterialId);
/**
Inverses normals of cutting box and sets indices.
\param[in] mesh Cutting box mesh
*/
void inverseNormalAndIndices(Mesh* mesh);
struct CmpVec
{
bool operator()(const nvidia::NvVec3& v1, const nvidia::NvVec3& v2) const;
};
typedef std::map<nvidia::NvVec3, std::map<uint32_t, uint32_t>, CmpVec> PointMap;
struct SharedFace
{
SharedFace() {}
SharedFace(uint32_t inW, uint32_t inH, int64_t inUD, int32_t inMatId) : w(inW), h(inH), f(Facet( 0, 3, inUD, inMatId ))
{
vertices.reserve((w + 1) * (h + 1));
}
uint32_t w, h;
Facet f;
std::vector<Nv::Blast::Vertex> vertices;
std::vector<Nv::Blast::Edge> edges;
std::vector<Nv::Blast::Facet> facets;
};
struct CmpSharedFace
{
bool operator()(const std::pair<nvidia::NvVec3, nvidia::NvVec3>& pv1, const std::pair<nvidia::NvVec3, nvidia::NvVec3>& pv2) const;
};
typedef std::map<std::pair<nvidia::NvVec3, nvidia::NvVec3>, SharedFace, CmpSharedFace> SharedFacesMap;
struct CutoutConfiguration;
void buildCuttingConeFaces(const CutoutConfiguration& conf, const std::vector<std::vector<nvidia::NvVec3>>& points,
float heightBot, float heightTop, float conicityBot, float conicityTop,
int64_t& id, int32_t seed, int32_t interiorMaterialId, SharedFacesMap& sharedFacesMap);
/**
Create cutting cone at some particular position.
\param[in] conf Cutout configuration parameters and data
\param[in] meshId Cutout index
\param[in] points Array of points for loop
\param[in] smoothingGroups Array of point indices at which smoothing group should be toggled
\param[in] heightBot Cutting cone bottom height (below z = 0)
\param[in] heightTop Cutting cone top height (below z = 0)
\param[in] conicityBot Cutting cone bottom points multiplier
\param[in] conicityTop Cutting cone top points multiplier
\param[in] id Cutting cylinder ID
\param[in] seed Seed for RNG
\param[in] interiorMaterialId Interior material index
\param[in] sharedFacesMap Shared faces for noisy fracture
*/
Mesh* getCuttingCone(const CutoutConfiguration& conf,
const std::vector<nvidia::NvVec3>& points, const std::set<int32_t>& smoothingGroups,
float heightBot, float heightTop, float conicityBot, float conicityTop,
int64_t& id, int32_t seed, int32_t interiorMaterialId, const SharedFacesMap& sharedFacesMap, bool inverseNormals = false);
};
}; | 4,321 | C | 33.576 | 255 | 0.714881 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdVolume.cpp | /* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define _CRT_SECURE_NO_WARNINGS
#include "btConvexHullComputer.h"
#include "vhacdVolume.h"
#include <algorithm>
#include <float.h>
#include <math.h>
#include <queue>
#include <string.h>
#ifdef _MSC_VER
#pragma warning(disable:4458 4100)
#endif
namespace VHACD {
/********************************************************/
/* AABB-triangle overlap test code */
/* by Tomas Akenine-Meuller */
/* Function: int32_t triBoxOverlap(float boxcenter[3], */
/* float boxhalfsize[3],float triverts[3][3]); */
/* History: */
/* 2001-03-05: released the code in its first version */
/* 2001-06-18: changed the order of the tests, faster */
/* */
/* Acknowledgement: Many thanks to Pierre Terdiman for */
/* suggestions and discussions on how to optimize code. */
/* Thanks to David Hunt for finding a ">="-bug! */
/********************************************************/
#define X 0
#define Y 1
#define Z 2
#define FINDMINMAX(x0, x1, x2, min, max) \
min = max = x0; \
if (x1 < min) \
min = x1; \
if (x1 > max) \
max = x1; \
if (x2 < min) \
min = x2; \
if (x2 > max) \
max = x2;
#define AXISTEST_X01(a, b, fa, fb) \
p0 = a * v0[Y] - b * v0[Z]; \
p2 = a * v2[Y] - b * v2[Z]; \
if (p0 < p2) { \
min = p0; \
max = p2; \
} \
else { \
min = p2; \
max = p0; \
} \
rad = fa * boxhalfsize[Y] + fb * boxhalfsize[Z]; \
if (min > rad || max < -rad) \
return 0;
#define AXISTEST_X2(a, b, fa, fb) \
p0 = a * v0[Y] - b * v0[Z]; \
p1 = a * v1[Y] - b * v1[Z]; \
if (p0 < p1) { \
min = p0; \
max = p1; \
} \
else { \
min = p1; \
max = p0; \
} \
rad = fa * boxhalfsize[Y] + fb * boxhalfsize[Z]; \
if (min > rad || max < -rad) \
return 0;
#define AXISTEST_Y02(a, b, fa, fb) \
p0 = -a * v0[X] + b * v0[Z]; \
p2 = -a * v2[X] + b * v2[Z]; \
if (p0 < p2) { \
min = p0; \
max = p2; \
} \
else { \
min = p2; \
max = p0; \
} \
rad = fa * boxhalfsize[X] + fb * boxhalfsize[Z]; \
if (min > rad || max < -rad) \
return 0;
#define AXISTEST_Y1(a, b, fa, fb) \
p0 = -a * v0[X] + b * v0[Z]; \
p1 = -a * v1[X] + b * v1[Z]; \
if (p0 < p1) { \
min = p0; \
max = p1; \
} \
else { \
min = p1; \
max = p0; \
} \
rad = fa * boxhalfsize[X] + fb * boxhalfsize[Z]; \
if (min > rad || max < -rad) \
return 0;
#define AXISTEST_Z12(a, b, fa, fb) \
p1 = a * v1[X] - b * v1[Y]; \
p2 = a * v2[X] - b * v2[Y]; \
if (p2 < p1) { \
min = p2; \
max = p1; \
} \
else { \
min = p1; \
max = p2; \
} \
rad = fa * boxhalfsize[X] + fb * boxhalfsize[Y]; \
if (min > rad || max < -rad) \
return 0;
#define AXISTEST_Z0(a, b, fa, fb) \
p0 = a * v0[X] - b * v0[Y]; \
p1 = a * v1[X] - b * v1[Y]; \
if (p0 < p1) { \
min = p0; \
max = p1; \
} \
else { \
min = p1; \
max = p0; \
} \
rad = fa * boxhalfsize[X] + fb * boxhalfsize[Y]; \
if (min > rad || max < -rad) \
return 0;
int32_t PlaneBoxOverlap(const Vec3<double>& normal,
const Vec3<double>& vert,
const Vec3<double>& maxbox)
{
int32_t q;
Vec3<double> vmin, vmax;
double v;
for (q = X; q <= Z; q++) {
v = vert[q];
if (normal[q] > 0.0) {
vmin[q] = -maxbox[q] - v;
vmax[q] = maxbox[q] - v;
}
else {
vmin[q] = maxbox[q] - v;
vmax[q] = -maxbox[q] - v;
}
}
if (normal * vmin > 0.0)
return 0;
if (normal * vmax >= 0.0)
return 1;
return 0;
}
int32_t TriBoxOverlap(const Vec3<double>& boxcenter,
const Vec3<double>& boxhalfsize,
const Vec3<double>& triver0,
const Vec3<double>& triver1,
const Vec3<double>& triver2)
{
/* use separating axis theorem to test overlap between triangle and box */
/* need to test for overlap in these directions: */
/* 1) the {x,y,z}-directions (actually, since we use the AABB of the triangle */
/* we do not even need to test these) */
/* 2) normal of the triangle */
/* 3) crossproduct(edge from tri, {x,y,z}-directin) */
/* this gives 3x3=9 more tests */
Vec3<double> v0, v1, v2;
double min, max, p0, p1, p2, rad, fex, fey, fez; // -NJMP- "d" local variable removed
Vec3<double> normal, e0, e1, e2;
/* This is the fastest branch on Sun */
/* move everything so that the boxcenter is in (0,0,0) */
v0 = triver0 - boxcenter;
v1 = triver1 - boxcenter;
v2 = triver2 - boxcenter;
/* compute triangle edges */
e0 = v1 - v0; /* tri edge 0 */
e1 = v2 - v1; /* tri edge 1 */
e2 = v0 - v2; /* tri edge 2 */
/* Bullet 3: */
/* test the 9 tests first (this was faster) */
fex = fabs(e0[X]);
fey = fabs(e0[Y]);
fez = fabs(e0[Z]);
AXISTEST_X01(e0[Z], e0[Y], fez, fey);
AXISTEST_Y02(e0[Z], e0[X], fez, fex);
AXISTEST_Z12(e0[Y], e0[X], fey, fex);
fex = fabs(e1[X]);
fey = fabs(e1[Y]);
fez = fabs(e1[Z]);
AXISTEST_X01(e1[Z], e1[Y], fez, fey);
AXISTEST_Y02(e1[Z], e1[X], fez, fex);
AXISTEST_Z0(e1[Y], e1[X], fey, fex);
fex = fabs(e2[X]);
fey = fabs(e2[Y]);
fez = fabs(e2[Z]);
AXISTEST_X2(e2[Z], e2[Y], fez, fey);
AXISTEST_Y1(e2[Z], e2[X], fez, fex);
AXISTEST_Z12(e2[Y], e2[X], fey, fex);
/* Bullet 1: */
/* first test overlap in the {x,y,z}-directions */
/* find min, max of the triangle each direction, and test for overlap in */
/* that direction -- this is equivalent to testing a minimal AABB around */
/* the triangle against the AABB */
/* test in X-direction */
FINDMINMAX(v0[X], v1[X], v2[X], min, max);
if (min > boxhalfsize[X] || max < -boxhalfsize[X])
return 0;
/* test in Y-direction */
FINDMINMAX(v0[Y], v1[Y], v2[Y], min, max);
if (min > boxhalfsize[Y] || max < -boxhalfsize[Y])
return 0;
/* test in Z-direction */
FINDMINMAX(v0[Z], v1[Z], v2[Z], min, max);
if (min > boxhalfsize[Z] || max < -boxhalfsize[Z])
return 0;
/* Bullet 2: */
/* test if the box intersects the plane of the triangle */
/* compute plane equation of triangle: normal*x+d=0 */
normal = e0 ^ e1;
if (!PlaneBoxOverlap(normal, v0, boxhalfsize))
return 0;
return 1; /* box and triangle overlaps */
}
// Slightly modified version of Stan Melax's code for 3x3 matrix diagonalization (Thanks Stan!)
// source: http://www.melax.com/diag.html?attredirects=0
void Diagonalize(const double (&A)[3][3], double (&Q)[3][3], double (&D)[3][3])
{
// A must be a symmetric matrix.
// returns Q and D such that
// Diagonal matrix D = QT * A * Q; and A = Q*D*QT
const int32_t maxsteps = 24; // certainly wont need that many.
int32_t k0, k1, k2;
double o[3], m[3];
double q[4] = { 0.0, 0.0, 0.0, 1.0 };
double jr[4];
double sqw, sqx, sqy, sqz;
double tmp1, tmp2, mq;
double AQ[3][3];
double thet, sgn, t, c;
for (int32_t i = 0; i < maxsteps; ++i) {
// quat to matrix
sqx = q[0] * q[0];
sqy = q[1] * q[1];
sqz = q[2] * q[2];
sqw = q[3] * q[3];
Q[0][0] = (sqx - sqy - sqz + sqw);
Q[1][1] = (-sqx + sqy - sqz + sqw);
Q[2][2] = (-sqx - sqy + sqz + sqw);
tmp1 = q[0] * q[1];
tmp2 = q[2] * q[3];
Q[1][0] = 2.0 * (tmp1 + tmp2);
Q[0][1] = 2.0 * (tmp1 - tmp2);
tmp1 = q[0] * q[2];
tmp2 = q[1] * q[3];
Q[2][0] = 2.0 * (tmp1 - tmp2);
Q[0][2] = 2.0 * (tmp1 + tmp2);
tmp1 = q[1] * q[2];
tmp2 = q[0] * q[3];
Q[2][1] = 2.0 * (tmp1 + tmp2);
Q[1][2] = 2.0 * (tmp1 - tmp2);
// AQ = A * Q
AQ[0][0] = Q[0][0] * A[0][0] + Q[1][0] * A[0][1] + Q[2][0] * A[0][2];
AQ[0][1] = Q[0][1] * A[0][0] + Q[1][1] * A[0][1] + Q[2][1] * A[0][2];
AQ[0][2] = Q[0][2] * A[0][0] + Q[1][2] * A[0][1] + Q[2][2] * A[0][2];
AQ[1][0] = Q[0][0] * A[0][1] + Q[1][0] * A[1][1] + Q[2][0] * A[1][2];
AQ[1][1] = Q[0][1] * A[0][1] + Q[1][1] * A[1][1] + Q[2][1] * A[1][2];
AQ[1][2] = Q[0][2] * A[0][1] + Q[1][2] * A[1][1] + Q[2][2] * A[1][2];
AQ[2][0] = Q[0][0] * A[0][2] + Q[1][0] * A[1][2] + Q[2][0] * A[2][2];
AQ[2][1] = Q[0][1] * A[0][2] + Q[1][1] * A[1][2] + Q[2][1] * A[2][2];
AQ[2][2] = Q[0][2] * A[0][2] + Q[1][2] * A[1][2] + Q[2][2] * A[2][2];
// D = Qt * AQ
D[0][0] = AQ[0][0] * Q[0][0] + AQ[1][0] * Q[1][0] + AQ[2][0] * Q[2][0];
D[0][1] = AQ[0][0] * Q[0][1] + AQ[1][0] * Q[1][1] + AQ[2][0] * Q[2][1];
D[0][2] = AQ[0][0] * Q[0][2] + AQ[1][0] * Q[1][2] + AQ[2][0] * Q[2][2];
D[1][0] = AQ[0][1] * Q[0][0] + AQ[1][1] * Q[1][0] + AQ[2][1] * Q[2][0];
D[1][1] = AQ[0][1] * Q[0][1] + AQ[1][1] * Q[1][1] + AQ[2][1] * Q[2][1];
D[1][2] = AQ[0][1] * Q[0][2] + AQ[1][1] * Q[1][2] + AQ[2][1] * Q[2][2];
D[2][0] = AQ[0][2] * Q[0][0] + AQ[1][2] * Q[1][0] + AQ[2][2] * Q[2][0];
D[2][1] = AQ[0][2] * Q[0][1] + AQ[1][2] * Q[1][1] + AQ[2][2] * Q[2][1];
D[2][2] = AQ[0][2] * Q[0][2] + AQ[1][2] * Q[1][2] + AQ[2][2] * Q[2][2];
o[0] = D[1][2];
o[1] = D[0][2];
o[2] = D[0][1];
m[0] = fabs(o[0]);
m[1] = fabs(o[1]);
m[2] = fabs(o[2]);
k0 = (m[0] > m[1] && m[0] > m[2]) ? 0 : (m[1] > m[2]) ? 1 : 2; // index of largest element of offdiag
k1 = (k0 + 1) % 3;
k2 = (k0 + 2) % 3;
if (o[k0] == 0.0) {
break; // diagonal already
}
thet = (D[k2][k2] - D[k1][k1]) / (2.0 * o[k0]);
sgn = (thet > 0.0) ? 1.0 : -1.0;
thet *= sgn; // make it positive
t = sgn / (thet + ((thet < 1.E6) ? sqrt(thet * thet + 1.0) : thet)); // sign(T)/(|T|+sqrt(T^2+1))
c = 1.0 / sqrt(t * t + 1.0); // c= 1/(t^2+1) , t=s/c
if (c == 1.0) {
break; // no room for improvement - reached machine precision.
}
jr[0] = jr[1] = jr[2] = jr[3] = 0.0;
jr[k0] = sgn * sqrt((1.0 - c) / 2.0); // using 1/2 angle identity sin(a/2) = sqrt((1-cos(a))/2)
jr[k0] *= -1.0; // since our quat-to-matrix convention was for v*M instead of M*v
jr[3] = sqrt(1.0 - jr[k0] * jr[k0]);
if (jr[3] == 1.0) {
break; // reached limits of floating point precision
}
q[0] = (q[3] * jr[0] + q[0] * jr[3] + q[1] * jr[2] - q[2] * jr[1]);
q[1] = (q[3] * jr[1] - q[0] * jr[2] + q[1] * jr[3] + q[2] * jr[0]);
q[2] = (q[3] * jr[2] + q[0] * jr[1] - q[1] * jr[0] + q[2] * jr[3]);
q[3] = (q[3] * jr[3] - q[0] * jr[0] - q[1] * jr[1] - q[2] * jr[2]);
mq = sqrt(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3]);
q[0] /= mq;
q[1] /= mq;
q[2] /= mq;
q[3] /= mq;
}
}
const double TetrahedronSet::EPS = 0.0000000000001;
VoxelSet::VoxelSet()
{
m_minBB[0] = m_minBB[1] = m_minBB[2] = 0.0;
m_minBBVoxels[0] = m_minBBVoxels[1] = m_minBBVoxels[2] = 0;
m_maxBBVoxels[0] = m_maxBBVoxels[1] = m_maxBBVoxels[2] = 1;
m_minBBPts[0] = m_minBBPts[1] = m_minBBPts[2] = 0;
m_maxBBPts[0] = m_maxBBPts[1] = m_maxBBPts[2] = 1;
m_barycenter[0] = m_barycenter[1] = m_barycenter[2] = 0;
m_barycenterPCA[0] = m_barycenterPCA[1] = m_barycenterPCA[2] = 0.0;
m_scale = 1.0;
m_unitVolume = 1.0;
m_numVoxelsOnSurface = 0;
m_numVoxelsInsideSurface = 0;
memset(m_Q, 0, sizeof(double) * 9);
memset(m_D, 0, sizeof(double) * 9);
}
VoxelSet::~VoxelSet(void)
{
}
void VoxelSet::ComputeBB()
{
const size_t nVoxels = m_voxels.Size();
if (nVoxels == 0)
return;
for (int32_t h = 0; h < 3; ++h) {
m_minBBVoxels[h] = m_voxels[0].m_coord[h];
m_maxBBVoxels[h] = m_voxels[0].m_coord[h];
}
Vec3<double> bary(0.0);
for (size_t p = 0; p < nVoxels; ++p) {
for (int32_t h = 0; h < 3; ++h) {
bary[h] += m_voxels[p].m_coord[h];
if (m_minBBVoxels[h] > m_voxels[p].m_coord[h])
m_minBBVoxels[h] = m_voxels[p].m_coord[h];
if (m_maxBBVoxels[h] < m_voxels[p].m_coord[h])
m_maxBBVoxels[h] = m_voxels[p].m_coord[h];
}
}
bary /= (double)nVoxels;
for (int32_t h = 0; h < 3; ++h) {
m_minBBPts[h] = m_minBBVoxels[h] * m_scale + m_minBB[h];
m_maxBBPts[h] = m_maxBBVoxels[h] * m_scale + m_minBB[h];
m_barycenter[h] = (short)(bary[h] + 0.5);
}
}
void VoxelSet::ComputeConvexHull(Mesh& meshCH, const size_t sampling) const
{
const size_t CLUSTER_SIZE = 65536;
const size_t nVoxels = m_voxels.Size();
if (nVoxels == 0)
return;
SArray<Vec3<double> > cpoints;
Vec3<double>* points = new Vec3<double>[CLUSTER_SIZE];
size_t p = 0;
size_t s = 0;
short i, j, k;
while (p < nVoxels) {
size_t q = 0;
while (q < CLUSTER_SIZE && p < nVoxels) {
if (m_voxels[p].m_data == PRIMITIVE_ON_SURFACE) {
++s;
if (s == sampling) {
s = 0;
i = m_voxels[p].m_coord[0];
j = m_voxels[p].m_coord[1];
k = m_voxels[p].m_coord[2];
Vec3<double> p0((i - 0.5) * m_scale, (j - 0.5) * m_scale, (k - 0.5) * m_scale);
Vec3<double> p1((i + 0.5) * m_scale, (j - 0.5) * m_scale, (k - 0.5) * m_scale);
Vec3<double> p2((i + 0.5) * m_scale, (j + 0.5) * m_scale, (k - 0.5) * m_scale);
Vec3<double> p3((i - 0.5) * m_scale, (j + 0.5) * m_scale, (k - 0.5) * m_scale);
Vec3<double> p4((i - 0.5) * m_scale, (j - 0.5) * m_scale, (k + 0.5) * m_scale);
Vec3<double> p5((i + 0.5) * m_scale, (j - 0.5) * m_scale, (k + 0.5) * m_scale);
Vec3<double> p6((i + 0.5) * m_scale, (j + 0.5) * m_scale, (k + 0.5) * m_scale);
Vec3<double> p7((i - 0.5) * m_scale, (j + 0.5) * m_scale, (k + 0.5) * m_scale);
points[q++] = p0 + m_minBB;
points[q++] = p1 + m_minBB;
points[q++] = p2 + m_minBB;
points[q++] = p3 + m_minBB;
points[q++] = p4 + m_minBB;
points[q++] = p5 + m_minBB;
points[q++] = p6 + m_minBB;
points[q++] = p7 + m_minBB;
}
}
++p;
}
btConvexHullComputer ch;
ch.compute((double*)points, 3 * sizeof(double), (int32_t)q, -1.0, -1.0);
for (int32_t v = 0; v < ch.vertices.size(); v++) {
cpoints.PushBack(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ()));
}
}
delete[] points;
points = cpoints.Data();
btConvexHullComputer ch;
ch.compute((double*)points, 3 * sizeof(double), (int32_t)cpoints.Size(), -1.0, -1.0);
meshCH.ResizePoints(0);
meshCH.ResizeTriangles(0);
for (int32_t v = 0; v < ch.vertices.size(); v++) {
meshCH.AddPoint(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ()));
}
const int32_t nt = ch.faces.size();
for (int32_t t = 0; t < nt; ++t) {
const btConvexHullComputer::Edge* sourceEdge = &(ch.edges[ch.faces[t]]);
int32_t a = sourceEdge->getSourceVertex();
int32_t b = sourceEdge->getTargetVertex();
const btConvexHullComputer::Edge* edge = sourceEdge->getNextEdgeOfFace();
int32_t c = edge->getTargetVertex();
while (c != a) {
meshCH.AddTriangle(Vec3<int32_t>(a, b, c));
edge = edge->getNextEdgeOfFace();
b = c;
c = edge->getTargetVertex();
}
}
}
void VoxelSet::GetPoints(const Voxel& voxel,
Vec3<double>* const pts) const
{
short i = voxel.m_coord[0];
short j = voxel.m_coord[1];
short k = voxel.m_coord[2];
pts[0][0] = (i - 0.5) * m_scale + m_minBB[0];
pts[1][0] = (i + 0.5) * m_scale + m_minBB[0];
pts[2][0] = (i + 0.5) * m_scale + m_minBB[0];
pts[3][0] = (i - 0.5) * m_scale + m_minBB[0];
pts[4][0] = (i - 0.5) * m_scale + m_minBB[0];
pts[5][0] = (i + 0.5) * m_scale + m_minBB[0];
pts[6][0] = (i + 0.5) * m_scale + m_minBB[0];
pts[7][0] = (i - 0.5) * m_scale + m_minBB[0];
pts[0][1] = (j - 0.5) * m_scale + m_minBB[1];
pts[1][1] = (j - 0.5) * m_scale + m_minBB[1];
pts[2][1] = (j + 0.5) * m_scale + m_minBB[1];
pts[3][1] = (j + 0.5) * m_scale + m_minBB[1];
pts[4][1] = (j - 0.5) * m_scale + m_minBB[1];
pts[5][1] = (j - 0.5) * m_scale + m_minBB[1];
pts[6][1] = (j + 0.5) * m_scale + m_minBB[1];
pts[7][1] = (j + 0.5) * m_scale + m_minBB[1];
pts[0][2] = (k - 0.5) * m_scale + m_minBB[2];
pts[1][2] = (k - 0.5) * m_scale + m_minBB[2];
pts[2][2] = (k - 0.5) * m_scale + m_minBB[2];
pts[3][2] = (k - 0.5) * m_scale + m_minBB[2];
pts[4][2] = (k + 0.5) * m_scale + m_minBB[2];
pts[5][2] = (k + 0.5) * m_scale + m_minBB[2];
pts[6][2] = (k + 0.5) * m_scale + m_minBB[2];
pts[7][2] = (k + 0.5) * m_scale + m_minBB[2];
}
void VoxelSet::Intersect(const Plane& plane,
SArray<Vec3<double> >* const positivePts,
SArray<Vec3<double> >* const negativePts,
const size_t sampling) const
{
const size_t nVoxels = m_voxels.Size();
if (nVoxels == 0)
return;
const double d0 = m_scale;
double d;
Vec3<double> pts[8];
Vec3<double> pt;
Voxel voxel;
size_t sp = 0;
size_t sn = 0;
for (size_t v = 0; v < nVoxels; ++v) {
voxel = m_voxels[v];
pt = GetPoint(voxel);
d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d;
// if (d >= 0.0 && d <= d0) positivePts->PushBack(pt);
// else if (d < 0.0 && -d <= d0) negativePts->PushBack(pt);
if (d >= 0.0) {
if (d <= d0) {
GetPoints(voxel, pts);
for (int32_t k = 0; k < 8; ++k) {
positivePts->PushBack(pts[k]);
}
}
else {
if (++sp == sampling) {
// positivePts->PushBack(pt);
GetPoints(voxel, pts);
for (int32_t k = 0; k < 8; ++k) {
positivePts->PushBack(pts[k]);
}
sp = 0;
}
}
}
else {
if (-d <= d0) {
GetPoints(voxel, pts);
for (int32_t k = 0; k < 8; ++k) {
negativePts->PushBack(pts[k]);
}
}
else {
if (++sn == sampling) {
// negativePts->PushBack(pt);
GetPoints(voxel, pts);
for (int32_t k = 0; k < 8; ++k) {
negativePts->PushBack(pts[k]);
}
sn = 0;
}
}
}
}
}
void VoxelSet::ComputeExteriorPoints(const Plane& plane,
const Mesh& mesh,
SArray<Vec3<double> >* const exteriorPts) const
{
const size_t nVoxels = m_voxels.Size();
if (nVoxels == 0)
return;
double d;
Vec3<double> pt;
Vec3<double> pts[8];
Voxel voxel;
for (size_t v = 0; v < nVoxels; ++v) {
voxel = m_voxels[v];
pt = GetPoint(voxel);
d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d;
if (d >= 0.0) {
if (!mesh.IsInside(pt)) {
GetPoints(voxel, pts);
for (int32_t k = 0; k < 8; ++k) {
exteriorPts->PushBack(pts[k]);
}
}
}
}
}
void VoxelSet::ComputeClippedVolumes(const Plane& plane,
double& positiveVolume,
double& negativeVolume) const
{
negativeVolume = 0.0;
positiveVolume = 0.0;
const size_t nVoxels = m_voxels.Size();
if (nVoxels == 0)
return;
double d;
Vec3<double> pt;
size_t nPositiveVoxels = 0;
for (size_t v = 0; v < nVoxels; ++v) {
pt = GetPoint(m_voxels[v]);
d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d;
nPositiveVoxels += (d >= 0.0);
}
size_t nNegativeVoxels = nVoxels - nPositiveVoxels;
positiveVolume = m_unitVolume * nPositiveVoxels;
negativeVolume = m_unitVolume * nNegativeVoxels;
}
void VoxelSet::SelectOnSurface(PrimitiveSet* const onSurfP) const
{
VoxelSet* const onSurf = (VoxelSet*)onSurfP;
const size_t nVoxels = m_voxels.Size();
if (nVoxels == 0)
return;
for (int32_t h = 0; h < 3; ++h) {
onSurf->m_minBB[h] = m_minBB[h];
}
onSurf->m_voxels.Resize(0);
onSurf->m_scale = m_scale;
onSurf->m_unitVolume = m_unitVolume;
onSurf->m_numVoxelsOnSurface = 0;
onSurf->m_numVoxelsInsideSurface = 0;
Voxel voxel;
for (size_t v = 0; v < nVoxels; ++v) {
voxel = m_voxels[v];
if (voxel.m_data == PRIMITIVE_ON_SURFACE) {
onSurf->m_voxels.PushBack(voxel);
++onSurf->m_numVoxelsOnSurface;
}
}
}
void VoxelSet::Clip(const Plane& plane,
PrimitiveSet* const positivePartP,
PrimitiveSet* const negativePartP) const
{
VoxelSet* const positivePart = (VoxelSet*)positivePartP;
VoxelSet* const negativePart = (VoxelSet*)negativePartP;
const size_t nVoxels = m_voxels.Size();
if (nVoxels == 0)
return;
for (int32_t h = 0; h < 3; ++h) {
negativePart->m_minBB[h] = positivePart->m_minBB[h] = m_minBB[h];
}
positivePart->m_voxels.Resize(0);
negativePart->m_voxels.Resize(0);
positivePart->m_voxels.Allocate(nVoxels);
negativePart->m_voxels.Allocate(nVoxels);
negativePart->m_scale = positivePart->m_scale = m_scale;
negativePart->m_unitVolume = positivePart->m_unitVolume = m_unitVolume;
negativePart->m_numVoxelsOnSurface = positivePart->m_numVoxelsOnSurface = 0;
negativePart->m_numVoxelsInsideSurface = positivePart->m_numVoxelsInsideSurface = 0;
double d;
Vec3<double> pt;
Voxel voxel;
const double d0 = m_scale;
for (size_t v = 0; v < nVoxels; ++v) {
voxel = m_voxels[v];
pt = GetPoint(voxel);
d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d;
if (d >= 0.0) {
if (voxel.m_data == PRIMITIVE_ON_SURFACE || d <= d0) {
voxel.m_data = PRIMITIVE_ON_SURFACE;
positivePart->m_voxels.PushBack(voxel);
++positivePart->m_numVoxelsOnSurface;
}
else {
positivePart->m_voxels.PushBack(voxel);
++positivePart->m_numVoxelsInsideSurface;
}
}
else {
if (voxel.m_data == PRIMITIVE_ON_SURFACE || -d <= d0) {
voxel.m_data = PRIMITIVE_ON_SURFACE;
negativePart->m_voxels.PushBack(voxel);
++negativePart->m_numVoxelsOnSurface;
}
else {
negativePart->m_voxels.PushBack(voxel);
++negativePart->m_numVoxelsInsideSurface;
}
}
}
}
void VoxelSet::Convert(Mesh& mesh, const VOXEL_VALUE value) const
{
const size_t nVoxels = m_voxels.Size();
if (nVoxels == 0)
return;
Voxel voxel;
Vec3<double> pts[8];
for (size_t v = 0; v < nVoxels; ++v) {
voxel = m_voxels[v];
if (voxel.m_data == value) {
GetPoints(voxel, pts);
int32_t s = (int32_t)mesh.GetNPoints();
for (int32_t k = 0; k < 8; ++k) {
mesh.AddPoint(pts[k]);
}
mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 2, s + 1));
mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 3, s + 2));
mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 5, s + 6));
mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 6, s + 7));
mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 6, s + 2));
mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 2, s + 3));
mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 1, s + 5));
mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 0, s + 1));
mesh.AddTriangle(Vec3<int32_t>(s + 6, s + 5, s + 1));
mesh.AddTriangle(Vec3<int32_t>(s + 6, s + 1, s + 2));
mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 0, s + 4));
mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 3, s + 0));
}
}
}
void VoxelSet::ComputePrincipalAxes()
{
const size_t nVoxels = m_voxels.Size();
if (nVoxels == 0)
return;
m_barycenterPCA[0] = m_barycenterPCA[1] = m_barycenterPCA[2] = 0.0;
for (size_t v = 0; v < nVoxels; ++v) {
Voxel& voxel = m_voxels[v];
m_barycenterPCA[0] += voxel.m_coord[0];
m_barycenterPCA[1] += voxel.m_coord[1];
m_barycenterPCA[2] += voxel.m_coord[2];
}
m_barycenterPCA /= (double)nVoxels;
double covMat[3][3] = { { 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0 } };
double x, y, z;
for (size_t v = 0; v < nVoxels; ++v) {
Voxel& voxel = m_voxels[v];
x = voxel.m_coord[0] - m_barycenter[0];
y = voxel.m_coord[1] - m_barycenter[1];
z = voxel.m_coord[2] - m_barycenter[2];
covMat[0][0] += x * x;
covMat[1][1] += y * y;
covMat[2][2] += z * z;
covMat[0][1] += x * y;
covMat[0][2] += x * z;
covMat[1][2] += y * z;
}
covMat[0][0] /= nVoxels;
covMat[1][1] /= nVoxels;
covMat[2][2] /= nVoxels;
covMat[0][1] /= nVoxels;
covMat[0][2] /= nVoxels;
covMat[1][2] /= nVoxels;
covMat[1][0] = covMat[0][1];
covMat[2][0] = covMat[0][2];
covMat[2][1] = covMat[1][2];
Diagonalize(covMat, m_Q, m_D);
}
Volume::Volume()
{
m_dim[0] = m_dim[1] = m_dim[2] = 0;
m_minBB[0] = m_minBB[1] = m_minBB[2] = 0.0;
m_maxBB[0] = m_maxBB[1] = m_maxBB[2] = 1.0;
m_numVoxelsOnSurface = 0;
m_numVoxelsInsideSurface = 0;
m_numVoxelsOutsideSurface = 0;
m_scale = 1.0;
m_data = 0;
}
Volume::~Volume(void)
{
delete[] m_data;
}
void Volume::Allocate()
{
delete[] m_data;
size_t size = m_dim[0] * m_dim[1] * m_dim[2];
m_data = new unsigned char[size];
memset(m_data, PRIMITIVE_UNDEFINED, sizeof(unsigned char) * size);
}
void Volume::Free()
{
delete[] m_data;
m_data = 0;
}
void Volume::FillOutsideSurface(const size_t i0,
const size_t j0,
const size_t k0,
const size_t i1,
const size_t j1,
const size_t k1)
{
const short neighbours[6][3] = { { 1, 0, 0 },
{ 0, 1, 0 },
{ 0, 0, 1 },
{ -1, 0, 0 },
{ 0, -1, 0 },
{ 0, 0, -1 } };
std::queue<Vec3<short> > fifo;
Vec3<short> current;
short a, b, c;
for (size_t i = i0; i < i1; ++i) {
for (size_t j = j0; j < j1; ++j) {
for (size_t k = k0; k < k1; ++k) {
if (GetVoxel(i, j, k) == PRIMITIVE_UNDEFINED) {
current[0] = (short)i;
current[1] = (short)j;
current[2] = (short)k;
fifo.push(current);
GetVoxel(current[0], current[1], current[2]) = PRIMITIVE_OUTSIDE_SURFACE;
++m_numVoxelsOutsideSurface;
while (fifo.size() > 0) {
current = fifo.front();
fifo.pop();
for (int32_t h = 0; h < 6; ++h) {
a = current[0] + neighbours[h][0];
b = current[1] + neighbours[h][1];
c = current[2] + neighbours[h][2];
if (a < 0 || a >= (int32_t)m_dim[0] || b < 0 || b >= (int32_t)m_dim[1] || c < 0 || c >= (int32_t)m_dim[2]) {
continue;
}
unsigned char& v = GetVoxel(a, b, c);
if (v == PRIMITIVE_UNDEFINED) {
v = PRIMITIVE_OUTSIDE_SURFACE;
++m_numVoxelsOutsideSurface;
fifo.push(Vec3<short>(a, b, c));
}
}
}
}
}
}
}
}
void Volume::FillInsideSurface()
{
const size_t i0 = m_dim[0];
const size_t j0 = m_dim[1];
const size_t k0 = m_dim[2];
for (size_t i = 0; i < i0; ++i) {
for (size_t j = 0; j < j0; ++j) {
for (size_t k = 0; k < k0; ++k) {
unsigned char& v = GetVoxel(i, j, k);
if (v == PRIMITIVE_UNDEFINED) {
v = PRIMITIVE_INSIDE_SURFACE;
++m_numVoxelsInsideSurface;
}
}
}
}
}
void Volume::Convert(Mesh& mesh, const VOXEL_VALUE value) const
{
const size_t i0 = m_dim[0];
const size_t j0 = m_dim[1];
const size_t k0 = m_dim[2];
for (size_t i = 0; i < i0; ++i) {
for (size_t j = 0; j < j0; ++j) {
for (size_t k = 0; k < k0; ++k) {
const unsigned char& voxel = GetVoxel(i, j, k);
if (voxel == value) {
Vec3<double> p0((i - 0.5) * m_scale, (j - 0.5) * m_scale, (k - 0.5) * m_scale);
Vec3<double> p1((i + 0.5) * m_scale, (j - 0.5) * m_scale, (k - 0.5) * m_scale);
Vec3<double> p2((i + 0.5) * m_scale, (j + 0.5) * m_scale, (k - 0.5) * m_scale);
Vec3<double> p3((i - 0.5) * m_scale, (j + 0.5) * m_scale, (k - 0.5) * m_scale);
Vec3<double> p4((i - 0.5) * m_scale, (j - 0.5) * m_scale, (k + 0.5) * m_scale);
Vec3<double> p5((i + 0.5) * m_scale, (j - 0.5) * m_scale, (k + 0.5) * m_scale);
Vec3<double> p6((i + 0.5) * m_scale, (j + 0.5) * m_scale, (k + 0.5) * m_scale);
Vec3<double> p7((i - 0.5) * m_scale, (j + 0.5) * m_scale, (k + 0.5) * m_scale);
int32_t s = (int32_t)mesh.GetNPoints();
mesh.AddPoint(p0 + m_minBB);
mesh.AddPoint(p1 + m_minBB);
mesh.AddPoint(p2 + m_minBB);
mesh.AddPoint(p3 + m_minBB);
mesh.AddPoint(p4 + m_minBB);
mesh.AddPoint(p5 + m_minBB);
mesh.AddPoint(p6 + m_minBB);
mesh.AddPoint(p7 + m_minBB);
mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 2, s + 1));
mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 3, s + 2));
mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 5, s + 6));
mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 6, s + 7));
mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 6, s + 2));
mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 2, s + 3));
mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 1, s + 5));
mesh.AddTriangle(Vec3<int32_t>(s + 4, s + 0, s + 1));
mesh.AddTriangle(Vec3<int32_t>(s + 6, s + 5, s + 1));
mesh.AddTriangle(Vec3<int32_t>(s + 6, s + 1, s + 2));
mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 0, s + 4));
mesh.AddTriangle(Vec3<int32_t>(s + 7, s + 3, s + 0));
}
}
}
}
}
void Volume::Convert(VoxelSet& vset) const
{
for (int32_t h = 0; h < 3; ++h) {
vset.m_minBB[h] = m_minBB[h];
}
vset.m_voxels.Allocate(m_numVoxelsInsideSurface + m_numVoxelsOnSurface);
vset.m_scale = m_scale;
vset.m_unitVolume = m_scale * m_scale * m_scale;
const short i0 = (short)m_dim[0];
const short j0 = (short)m_dim[1];
const short k0 = (short)m_dim[2];
Voxel voxel;
vset.m_numVoxelsOnSurface = 0;
vset.m_numVoxelsInsideSurface = 0;
for (short i = 0; i < i0; ++i) {
for (short j = 0; j < j0; ++j) {
for (short k = 0; k < k0; ++k) {
const unsigned char& value = GetVoxel(i, j, k);
if (value == PRIMITIVE_INSIDE_SURFACE) {
voxel.m_coord[0] = i;
voxel.m_coord[1] = j;
voxel.m_coord[2] = k;
voxel.m_data = PRIMITIVE_INSIDE_SURFACE;
vset.m_voxels.PushBack(voxel);
++vset.m_numVoxelsInsideSurface;
}
else if (value == PRIMITIVE_ON_SURFACE) {
voxel.m_coord[0] = i;
voxel.m_coord[1] = j;
voxel.m_coord[2] = k;
voxel.m_data = PRIMITIVE_ON_SURFACE;
vset.m_voxels.PushBack(voxel);
++vset.m_numVoxelsOnSurface;
}
}
}
}
}
void Volume::Convert(TetrahedronSet& tset) const
{
tset.m_tetrahedra.Allocate(5 * (m_numVoxelsInsideSurface + m_numVoxelsOnSurface));
tset.m_scale = m_scale;
const short i0 = (short)m_dim[0];
const short j0 = (short)m_dim[1];
const short k0 = (short)m_dim[2];
tset.m_numTetrahedraOnSurface = 0;
tset.m_numTetrahedraInsideSurface = 0;
Tetrahedron tetrahedron;
for (short i = 0; i < i0; ++i) {
for (short j = 0; j < j0; ++j) {
for (short k = 0; k < k0; ++k) {
const unsigned char& value = GetVoxel(i, j, k);
if (value == PRIMITIVE_INSIDE_SURFACE || value == PRIMITIVE_ON_SURFACE) {
tetrahedron.m_data = value;
Vec3<double> p1((i - 0.5) * m_scale + m_minBB[0], (j - 0.5) * m_scale + m_minBB[1], (k - 0.5) * m_scale + m_minBB[2]);
Vec3<double> p2((i + 0.5) * m_scale + m_minBB[0], (j - 0.5) * m_scale + m_minBB[1], (k - 0.5) * m_scale + m_minBB[2]);
Vec3<double> p3((i + 0.5) * m_scale + m_minBB[0], (j + 0.5) * m_scale + m_minBB[1], (k - 0.5) * m_scale + m_minBB[2]);
Vec3<double> p4((i - 0.5) * m_scale + m_minBB[0], (j + 0.5) * m_scale + m_minBB[1], (k - 0.5) * m_scale + m_minBB[2]);
Vec3<double> p5((i - 0.5) * m_scale + m_minBB[0], (j - 0.5) * m_scale + m_minBB[1], (k + 0.5) * m_scale + m_minBB[2]);
Vec3<double> p6((i + 0.5) * m_scale + m_minBB[0], (j - 0.5) * m_scale + m_minBB[1], (k + 0.5) * m_scale + m_minBB[2]);
Vec3<double> p7((i + 0.5) * m_scale + m_minBB[0], (j + 0.5) * m_scale + m_minBB[1], (k + 0.5) * m_scale + m_minBB[2]);
Vec3<double> p8((i - 0.5) * m_scale + m_minBB[0], (j + 0.5) * m_scale + m_minBB[1], (k + 0.5) * m_scale + m_minBB[2]);
tetrahedron.m_pts[0] = p2;
tetrahedron.m_pts[1] = p4;
tetrahedron.m_pts[2] = p7;
tetrahedron.m_pts[3] = p5;
tset.m_tetrahedra.PushBack(tetrahedron);
tetrahedron.m_pts[0] = p6;
tetrahedron.m_pts[1] = p2;
tetrahedron.m_pts[2] = p7;
tetrahedron.m_pts[3] = p5;
tset.m_tetrahedra.PushBack(tetrahedron);
tetrahedron.m_pts[0] = p3;
tetrahedron.m_pts[1] = p4;
tetrahedron.m_pts[2] = p7;
tetrahedron.m_pts[3] = p2;
tset.m_tetrahedra.PushBack(tetrahedron);
tetrahedron.m_pts[0] = p1;
tetrahedron.m_pts[1] = p4;
tetrahedron.m_pts[2] = p2;
tetrahedron.m_pts[3] = p5;
tset.m_tetrahedra.PushBack(tetrahedron);
tetrahedron.m_pts[0] = p8;
tetrahedron.m_pts[1] = p5;
tetrahedron.m_pts[2] = p7;
tetrahedron.m_pts[3] = p4;
tset.m_tetrahedra.PushBack(tetrahedron);
if (value == PRIMITIVE_INSIDE_SURFACE) {
tset.m_numTetrahedraInsideSurface += 5;
}
else {
tset.m_numTetrahedraOnSurface += 5;
}
}
}
}
}
}
void Volume::AlignToPrincipalAxes(double (&rot)[3][3]) const
{
const short i0 = (short)m_dim[0];
const short j0 = (short)m_dim[1];
const short k0 = (short)m_dim[2];
Vec3<double> barycenter(0.0);
size_t nVoxels = 0;
for (short i = 0; i < i0; ++i) {
for (short j = 0; j < j0; ++j) {
for (short k = 0; k < k0; ++k) {
const unsigned char& value = GetVoxel(i, j, k);
if (value == PRIMITIVE_INSIDE_SURFACE || value == PRIMITIVE_ON_SURFACE) {
barycenter[0] += i;
barycenter[1] += j;
barycenter[2] += k;
++nVoxels;
}
}
}
}
barycenter /= (double)nVoxels;
double covMat[3][3] = { { 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0 } };
double x, y, z;
for (short i = 0; i < i0; ++i) {
for (short j = 0; j < j0; ++j) {
for (short k = 0; k < k0; ++k) {
const unsigned char& value = GetVoxel(i, j, k);
if (value == PRIMITIVE_INSIDE_SURFACE || value == PRIMITIVE_ON_SURFACE) {
x = i - barycenter[0];
y = j - barycenter[1];
z = k - barycenter[2];
covMat[0][0] += x * x;
covMat[1][1] += y * y;
covMat[2][2] += z * z;
covMat[0][1] += x * y;
covMat[0][2] += x * z;
covMat[1][2] += y * z;
}
}
}
}
covMat[1][0] = covMat[0][1];
covMat[2][0] = covMat[0][2];
covMat[2][1] = covMat[1][2];
double D[3][3];
Diagonalize(covMat, rot, D);
}
TetrahedronSet::TetrahedronSet()
{
m_minBB[0] = m_minBB[1] = m_minBB[2] = 0.0;
m_maxBB[0] = m_maxBB[1] = m_maxBB[2] = 1.0;
m_barycenter[0] = m_barycenter[1] = m_barycenter[2] = 0.0;
m_scale = 1.0;
m_numTetrahedraOnSurface = 0;
m_numTetrahedraInsideSurface = 0;
memset(m_Q, 0, sizeof(double) * 9);
memset(m_D, 0, sizeof(double) * 9);
}
TetrahedronSet::~TetrahedronSet(void)
{
}
void TetrahedronSet::ComputeBB()
{
const size_t nTetrahedra = m_tetrahedra.Size();
if (nTetrahedra == 0)
return;
for (int32_t h = 0; h < 3; ++h) {
m_minBB[h] = m_maxBB[h] = m_tetrahedra[0].m_pts[0][h];
m_barycenter[h] = 0.0;
}
for (size_t p = 0; p < nTetrahedra; ++p) {
for (int32_t i = 0; i < 4; ++i) {
for (int32_t h = 0; h < 3; ++h) {
if (m_minBB[h] > m_tetrahedra[p].m_pts[i][h])
m_minBB[h] = m_tetrahedra[p].m_pts[i][h];
if (m_maxBB[h] < m_tetrahedra[p].m_pts[i][h])
m_maxBB[h] = m_tetrahedra[p].m_pts[i][h];
m_barycenter[h] += m_tetrahedra[p].m_pts[i][h];
}
}
}
m_barycenter /= (double)(4 * nTetrahedra);
}
void TetrahedronSet::ComputeConvexHull(Mesh& meshCH, const size_t sampling) const
{
const size_t CLUSTER_SIZE = 65536;
const size_t nTetrahedra = m_tetrahedra.Size();
if (nTetrahedra == 0)
return;
SArray<Vec3<double> > cpoints;
Vec3<double>* points = new Vec3<double>[CLUSTER_SIZE];
size_t p = 0;
while (p < nTetrahedra) {
size_t q = 0;
size_t s = 0;
while (q < CLUSTER_SIZE && p < nTetrahedra) {
if (m_tetrahedra[p].m_data == PRIMITIVE_ON_SURFACE) {
++s;
if (s == sampling) {
s = 0;
for (int32_t a = 0; a < 4; ++a) {
points[q++] = m_tetrahedra[p].m_pts[a];
for (int32_t xx = 0; xx < 3; ++xx) {
assert(m_tetrahedra[p].m_pts[a][xx] + EPS >= m_minBB[xx]);
assert(m_tetrahedra[p].m_pts[a][xx] <= m_maxBB[xx] + EPS);
}
}
}
}
++p;
}
btConvexHullComputer ch;
ch.compute((double*)points, 3 * sizeof(double), (int32_t)q, -1.0, -1.0);
for (int32_t v = 0; v < ch.vertices.size(); v++) {
cpoints.PushBack(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ()));
}
}
delete[] points;
points = cpoints.Data();
btConvexHullComputer ch;
ch.compute((double*)points, 3 * sizeof(double), (int32_t)cpoints.Size(), -1.0, -1.0);
meshCH.ResizePoints(0);
meshCH.ResizeTriangles(0);
for (int32_t v = 0; v < ch.vertices.size(); v++) {
meshCH.AddPoint(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ()));
}
const int32_t nt = ch.faces.size();
for (int32_t t = 0; t < nt; ++t) {
const btConvexHullComputer::Edge* sourceEdge = &(ch.edges[ch.faces[t]]);
int32_t a = sourceEdge->getSourceVertex();
int32_t b = sourceEdge->getTargetVertex();
const btConvexHullComputer::Edge* edge = sourceEdge->getNextEdgeOfFace();
int32_t c = edge->getTargetVertex();
while (c != a) {
meshCH.AddTriangle(Vec3<int32_t>(a, b, c));
edge = edge->getNextEdgeOfFace();
b = c;
c = edge->getTargetVertex();
}
}
}
inline bool TetrahedronSet::Add(Tetrahedron& tetrahedron)
{
double v = ComputeVolume4(tetrahedron.m_pts[0], tetrahedron.m_pts[1], tetrahedron.m_pts[2], tetrahedron.m_pts[3]);
const double EPS = 0.0000000001;
if (fabs(v) < EPS) {
return false;
}
else if (v < 0.0) {
Vec3<double> tmp = tetrahedron.m_pts[0];
tetrahedron.m_pts[0] = tetrahedron.m_pts[1];
tetrahedron.m_pts[1] = tmp;
}
for (int32_t a = 0; a < 4; ++a) {
for (int32_t xx = 0; xx < 3; ++xx) {
assert(tetrahedron.m_pts[a][xx] + EPS >= m_minBB[xx]);
assert(tetrahedron.m_pts[a][xx] <= m_maxBB[xx] + EPS);
}
}
m_tetrahedra.PushBack(tetrahedron);
return true;
}
void TetrahedronSet::AddClippedTetrahedra(const Vec3<double> (&pts)[10], const int32_t nPts)
{
const int32_t tetF[4][3] = { { 0, 1, 2 }, { 2, 1, 3 }, { 3, 1, 0 }, { 3, 0, 2 } };
if (nPts < 4) {
return;
}
else if (nPts == 4) {
Tetrahedron tetrahedron;
tetrahedron.m_data = PRIMITIVE_ON_SURFACE;
tetrahedron.m_pts[0] = pts[0];
tetrahedron.m_pts[1] = pts[1];
tetrahedron.m_pts[2] = pts[2];
tetrahedron.m_pts[3] = pts[3];
if (Add(tetrahedron)) {
++m_numTetrahedraOnSurface;
}
}
else if (nPts == 5) {
const int32_t tet[15][4] = {
{ 0, 1, 2, 3 }, { 1, 2, 3, 4 }, { 0, 2, 3, 4 }, { 0, 1, 3, 4 }, { 0, 1, 2, 4 },
};
const int32_t rem[5] = { 4, 0, 1, 2, 3 };
double maxVol = 0.0;
int32_t h0 = -1;
Tetrahedron tetrahedron0;
tetrahedron0.m_data = PRIMITIVE_ON_SURFACE;
for (int32_t h = 0; h < 5; ++h) {
double v = ComputeVolume4(pts[tet[h][0]], pts[tet[h][1]], pts[tet[h][2]], pts[tet[h][3]]);
if (v > maxVol) {
h0 = h;
tetrahedron0.m_pts[0] = pts[tet[h][0]];
tetrahedron0.m_pts[1] = pts[tet[h][1]];
tetrahedron0.m_pts[2] = pts[tet[h][2]];
tetrahedron0.m_pts[3] = pts[tet[h][3]];
maxVol = v;
}
else if (-v > maxVol) {
h0 = h;
tetrahedron0.m_pts[0] = pts[tet[h][1]];
tetrahedron0.m_pts[1] = pts[tet[h][0]];
tetrahedron0.m_pts[2] = pts[tet[h][2]];
tetrahedron0.m_pts[3] = pts[tet[h][3]];
maxVol = -v;
}
}
if (h0 == -1)
return;
if (Add(tetrahedron0)) {
++m_numTetrahedraOnSurface;
}
else {
return;
}
int32_t a = rem[h0];
maxVol = 0.0;
int32_t h1 = -1;
Tetrahedron tetrahedron1;
tetrahedron1.m_data = PRIMITIVE_ON_SURFACE;
for (int32_t h = 0; h < 4; ++h) {
double v = ComputeVolume4(pts[a], tetrahedron0.m_pts[tetF[h][0]], tetrahedron0.m_pts[tetF[h][1]], tetrahedron0.m_pts[tetF[h][2]]);
if (v > maxVol) {
h1 = h;
tetrahedron1.m_pts[0] = pts[a];
tetrahedron1.m_pts[1] = tetrahedron0.m_pts[tetF[h][0]];
tetrahedron1.m_pts[2] = tetrahedron0.m_pts[tetF[h][1]];
tetrahedron1.m_pts[3] = tetrahedron0.m_pts[tetF[h][2]];
maxVol = v;
}
}
if (h1 == -1 && Add(tetrahedron1)) {
++m_numTetrahedraOnSurface;
}
}
else if (nPts == 6) {
const int32_t tet[15][4] = { { 2, 3, 4, 5 }, { 1, 3, 4, 5 }, { 1, 2, 4, 5 }, { 1, 2, 3, 5 }, { 1, 2, 3, 4 },
{ 0, 3, 4, 5 }, { 0, 2, 4, 5 }, { 0, 2, 3, 5 }, { 0, 2, 3, 4 }, { 0, 1, 4, 5 },
{ 0, 1, 3, 5 }, { 0, 1, 3, 4 }, { 0, 1, 2, 5 }, { 0, 1, 2, 4 }, { 0, 1, 2, 3 } };
const int32_t rem[15][2] = { { 0, 1 }, { 0, 2 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
{ 1, 2 }, { 1, 3 }, { 1, 4 }, { 1, 5 }, { 2, 3 },
{ 2, 4 }, { 2, 5 }, { 3, 4 }, { 3, 5 }, { 4, 5 } };
double maxVol = 0.0;
int32_t h0 = -1;
Tetrahedron tetrahedron0;
tetrahedron0.m_data = PRIMITIVE_ON_SURFACE;
for (int32_t h = 0; h < 15; ++h) {
double v = ComputeVolume4(pts[tet[h][0]], pts[tet[h][1]], pts[tet[h][2]], pts[tet[h][3]]);
if (v > maxVol) {
h0 = h;
tetrahedron0.m_pts[0] = pts[tet[h][0]];
tetrahedron0.m_pts[1] = pts[tet[h][1]];
tetrahedron0.m_pts[2] = pts[tet[h][2]];
tetrahedron0.m_pts[3] = pts[tet[h][3]];
maxVol = v;
}
else if (-v > maxVol) {
h0 = h;
tetrahedron0.m_pts[0] = pts[tet[h][1]];
tetrahedron0.m_pts[1] = pts[tet[h][0]];
tetrahedron0.m_pts[2] = pts[tet[h][2]];
tetrahedron0.m_pts[3] = pts[tet[h][3]];
maxVol = -v;
}
}
if (h0 == -1)
return;
if (Add(tetrahedron0)) {
++m_numTetrahedraOnSurface;
}
else {
return;
}
int32_t a0 = rem[h0][0];
int32_t a1 = rem[h0][1];
int32_t h1 = -1;
Tetrahedron tetrahedron1;
tetrahedron1.m_data = PRIMITIVE_ON_SURFACE;
maxVol = 0.0;
for (int32_t h = 0; h < 4; ++h) {
double v = ComputeVolume4(pts[a0], tetrahedron0.m_pts[tetF[h][0]], tetrahedron0.m_pts[tetF[h][1]], tetrahedron0.m_pts[tetF[h][2]]);
if (v > maxVol) {
h1 = h;
tetrahedron1.m_pts[0] = pts[a0];
tetrahedron1.m_pts[1] = tetrahedron0.m_pts[tetF[h][0]];
tetrahedron1.m_pts[2] = tetrahedron0.m_pts[tetF[h][1]];
tetrahedron1.m_pts[3] = tetrahedron0.m_pts[tetF[h][2]];
maxVol = v;
}
}
if (h1 != -1 && Add(tetrahedron1)) {
++m_numTetrahedraOnSurface;
}
else {
h1 = -1;
}
maxVol = 0.0;
int32_t h2 = -1;
Tetrahedron tetrahedron2;
tetrahedron2.m_data = PRIMITIVE_ON_SURFACE;
for (int32_t h = 0; h < 4; ++h) {
double v = ComputeVolume4(pts[a0], tetrahedron0.m_pts[tetF[h][0]], tetrahedron0.m_pts[tetF[h][1]], tetrahedron0.m_pts[tetF[h][2]]);
if (h == h1)
continue;
if (v > maxVol) {
h2 = h;
tetrahedron2.m_pts[0] = pts[a1];
tetrahedron2.m_pts[1] = tetrahedron0.m_pts[tetF[h][0]];
tetrahedron2.m_pts[2] = tetrahedron0.m_pts[tetF[h][1]];
tetrahedron2.m_pts[3] = tetrahedron0.m_pts[tetF[h][2]];
maxVol = v;
}
}
if (h1 != -1) {
for (int32_t h = 0; h < 4; ++h) {
double v = ComputeVolume4(pts[a1], tetrahedron1.m_pts[tetF[h][0]], tetrahedron1.m_pts[tetF[h][1]], tetrahedron1.m_pts[tetF[h][2]]);
if (h == 1)
continue;
if (v > maxVol) {
h2 = h;
tetrahedron2.m_pts[0] = pts[a1];
tetrahedron2.m_pts[1] = tetrahedron1.m_pts[tetF[h][0]];
tetrahedron2.m_pts[2] = tetrahedron1.m_pts[tetF[h][1]];
tetrahedron2.m_pts[3] = tetrahedron1.m_pts[tetF[h][2]];
maxVol = v;
}
}
}
if (h2 != -1 && Add(tetrahedron2)) {
++m_numTetrahedraOnSurface;
}
}
else {
assert(0);
}
}
void TetrahedronSet::Intersect(const Plane& plane,
SArray<Vec3<double> >* const positivePts,
SArray<Vec3<double> >* const negativePts,
const size_t sampling) const
{
const size_t nTetrahedra = m_tetrahedra.Size();
if (nTetrahedra == 0)
return;
}
void TetrahedronSet::ComputeExteriorPoints(const Plane& plane,
const Mesh& mesh,
SArray<Vec3<double> >* const exteriorPts) const
{
}
void TetrahedronSet::ComputeClippedVolumes(const Plane& plane,
double& positiveVolume,
double& negativeVolume) const
{
const size_t nTetrahedra = m_tetrahedra.Size();
if (nTetrahedra == 0)
return;
}
void TetrahedronSet::SelectOnSurface(PrimitiveSet* const onSurfP) const
{
TetrahedronSet* const onSurf = (TetrahedronSet*)onSurfP;
const size_t nTetrahedra = m_tetrahedra.Size();
if (nTetrahedra == 0)
return;
onSurf->m_tetrahedra.Resize(0);
onSurf->m_scale = m_scale;
onSurf->m_numTetrahedraOnSurface = 0;
onSurf->m_numTetrahedraInsideSurface = 0;
onSurf->m_barycenter = m_barycenter;
onSurf->m_minBB = m_minBB;
onSurf->m_maxBB = m_maxBB;
for (int32_t i = 0; i < 3; ++i) {
for (int32_t j = 0; j < 3; ++j) {
onSurf->m_Q[i][j] = m_Q[i][j];
onSurf->m_D[i][j] = m_D[i][j];
}
}
Tetrahedron tetrahedron;
for (size_t v = 0; v < nTetrahedra; ++v) {
tetrahedron = m_tetrahedra[v];
if (tetrahedron.m_data == PRIMITIVE_ON_SURFACE) {
onSurf->m_tetrahedra.PushBack(tetrahedron);
++onSurf->m_numTetrahedraOnSurface;
}
}
}
void TetrahedronSet::Clip(const Plane& plane,
PrimitiveSet* const positivePartP,
PrimitiveSet* const negativePartP) const
{
TetrahedronSet* const positivePart = (TetrahedronSet*)positivePartP;
TetrahedronSet* const negativePart = (TetrahedronSet*)negativePartP;
const size_t nTetrahedra = m_tetrahedra.Size();
if (nTetrahedra == 0)
return;
positivePart->m_tetrahedra.Resize(0);
negativePart->m_tetrahedra.Resize(0);
positivePart->m_tetrahedra.Allocate(nTetrahedra);
negativePart->m_tetrahedra.Allocate(nTetrahedra);
negativePart->m_scale = positivePart->m_scale = m_scale;
negativePart->m_numTetrahedraOnSurface = positivePart->m_numTetrahedraOnSurface = 0;
negativePart->m_numTetrahedraInsideSurface = positivePart->m_numTetrahedraInsideSurface = 0;
negativePart->m_barycenter = m_barycenter;
positivePart->m_barycenter = m_barycenter;
negativePart->m_minBB = m_minBB;
positivePart->m_minBB = m_minBB;
negativePart->m_maxBB = m_maxBB;
positivePart->m_maxBB = m_maxBB;
for (int32_t i = 0; i < 3; ++i) {
for (int32_t j = 0; j < 3; ++j) {
negativePart->m_Q[i][j] = positivePart->m_Q[i][j] = m_Q[i][j];
negativePart->m_D[i][j] = positivePart->m_D[i][j] = m_D[i][j];
}
}
Tetrahedron tetrahedron;
double delta, alpha;
int32_t sign[4];
int32_t npos, nneg;
Vec3<double> posPts[10];
Vec3<double> negPts[10];
Vec3<double> P0, P1, M;
const Vec3<double> n(plane.m_a, plane.m_b, plane.m_c);
const int32_t edges[6][2] = { { 0, 1 }, { 0, 2 }, { 0, 3 }, { 1, 2 }, { 1, 3 }, { 2, 3 } };
double dist;
for (size_t v = 0; v < nTetrahedra; ++v) {
tetrahedron = m_tetrahedra[v];
npos = nneg = 0;
for (int32_t i = 0; i < 4; ++i) {
dist = plane.m_a * tetrahedron.m_pts[i][0] + plane.m_b * tetrahedron.m_pts[i][1] + plane.m_c * tetrahedron.m_pts[i][2] + plane.m_d;
if (dist > 0.0) {
sign[i] = 1;
posPts[npos] = tetrahedron.m_pts[i];
++npos;
}
else {
sign[i] = -1;
negPts[nneg] = tetrahedron.m_pts[i];
++nneg;
}
}
if (npos == 4) {
positivePart->Add(tetrahedron);
if (tetrahedron.m_data == PRIMITIVE_ON_SURFACE) {
++positivePart->m_numTetrahedraOnSurface;
}
else {
++positivePart->m_numTetrahedraInsideSurface;
}
}
else if (nneg == 4) {
negativePart->Add(tetrahedron);
if (tetrahedron.m_data == PRIMITIVE_ON_SURFACE) {
++negativePart->m_numTetrahedraOnSurface;
}
else {
++negativePart->m_numTetrahedraInsideSurface;
}
}
else {
int32_t nnew = 0;
for (int32_t j = 0; j < 6; ++j) {
if (sign[edges[j][0]] * sign[edges[j][1]] == -1) {
P0 = tetrahedron.m_pts[edges[j][0]];
P1 = tetrahedron.m_pts[edges[j][1]];
delta = (P0 - P1) * n;
alpha = -(plane.m_d + (n * P1)) / delta;
assert(alpha >= 0.0 && alpha <= 1.0);
M = alpha * P0 + (1 - alpha) * P1;
for (int32_t xx = 0; xx < 3; ++xx) {
assert(M[xx] + EPS >= m_minBB[xx]);
assert(M[xx] <= m_maxBB[xx] + EPS);
}
posPts[npos++] = M;
negPts[nneg++] = M;
++nnew;
}
}
negativePart->AddClippedTetrahedra(negPts, nneg);
positivePart->AddClippedTetrahedra(posPts, npos);
}
}
}
void TetrahedronSet::Convert(Mesh& mesh, const VOXEL_VALUE value) const
{
const size_t nTetrahedra = m_tetrahedra.Size();
if (nTetrahedra == 0)
return;
for (size_t v = 0; v < nTetrahedra; ++v) {
const Tetrahedron& tetrahedron = m_tetrahedra[v];
if (tetrahedron.m_data == value) {
int32_t s = (int32_t)mesh.GetNPoints();
mesh.AddPoint(tetrahedron.m_pts[0]);
mesh.AddPoint(tetrahedron.m_pts[1]);
mesh.AddPoint(tetrahedron.m_pts[2]);
mesh.AddPoint(tetrahedron.m_pts[3]);
mesh.AddTriangle(Vec3<int32_t>(s + 0, s + 1, s + 2));
mesh.AddTriangle(Vec3<int32_t>(s + 2, s + 1, s + 3));
mesh.AddTriangle(Vec3<int32_t>(s + 3, s + 1, s + 0));
mesh.AddTriangle(Vec3<int32_t>(s + 3, s + 0, s + 2));
}
}
}
const double TetrahedronSet::ComputeVolume() const
{
const size_t nTetrahedra = m_tetrahedra.Size();
if (nTetrahedra == 0)
return 0.0;
double volume = 0.0;
for (size_t v = 0; v < nTetrahedra; ++v) {
const Tetrahedron& tetrahedron = m_tetrahedra[v];
volume += fabs(ComputeVolume4(tetrahedron.m_pts[0], tetrahedron.m_pts[1], tetrahedron.m_pts[2], tetrahedron.m_pts[3]));
}
return volume / 6.0;
}
const double TetrahedronSet::ComputeMaxVolumeError() const
{
const size_t nTetrahedra = m_tetrahedra.Size();
if (nTetrahedra == 0)
return 0.0;
double volume = 0.0;
for (size_t v = 0; v < nTetrahedra; ++v) {
const Tetrahedron& tetrahedron = m_tetrahedra[v];
if (tetrahedron.m_data == PRIMITIVE_ON_SURFACE) {
volume += fabs(ComputeVolume4(tetrahedron.m_pts[0], tetrahedron.m_pts[1], tetrahedron.m_pts[2], tetrahedron.m_pts[3]));
}
}
return volume / 6.0;
}
void TetrahedronSet::RevertAlignToPrincipalAxes()
{
const size_t nTetrahedra = m_tetrahedra.Size();
if (nTetrahedra == 0)
return;
double x, y, z;
for (size_t v = 0; v < nTetrahedra; ++v) {
Tetrahedron& tetrahedron = m_tetrahedra[v];
for (int32_t i = 0; i < 4; ++i) {
x = tetrahedron.m_pts[i][0] - m_barycenter[0];
y = tetrahedron.m_pts[i][1] - m_barycenter[1];
z = tetrahedron.m_pts[i][2] - m_barycenter[2];
tetrahedron.m_pts[i][0] = m_Q[0][0] * x + m_Q[0][1] * y + m_Q[0][2] * z + m_barycenter[0];
tetrahedron.m_pts[i][1] = m_Q[1][0] * x + m_Q[1][1] * y + m_Q[1][2] * z + m_barycenter[1];
tetrahedron.m_pts[i][2] = m_Q[2][0] * x + m_Q[2][1] * y + m_Q[2][2] * z + m_barycenter[2];
}
}
ComputeBB();
}
void TetrahedronSet::ComputePrincipalAxes()
{
const size_t nTetrahedra = m_tetrahedra.Size();
if (nTetrahedra == 0)
return;
double covMat[3][3] = { { 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0 } };
double x, y, z;
for (size_t v = 0; v < nTetrahedra; ++v) {
Tetrahedron& tetrahedron = m_tetrahedra[v];
for (int32_t i = 0; i < 4; ++i) {
x = tetrahedron.m_pts[i][0] - m_barycenter[0];
y = tetrahedron.m_pts[i][1] - m_barycenter[1];
z = tetrahedron.m_pts[i][2] - m_barycenter[2];
covMat[0][0] += x * x;
covMat[1][1] += y * y;
covMat[2][2] += z * z;
covMat[0][1] += x * y;
covMat[0][2] += x * z;
covMat[1][2] += y * z;
}
}
double n = nTetrahedra * 4.0;
covMat[0][0] /= n;
covMat[1][1] /= n;
covMat[2][2] /= n;
covMat[0][1] /= n;
covMat[0][2] /= n;
covMat[1][2] /= n;
covMat[1][0] = covMat[0][1];
covMat[2][0] = covMat[0][2];
covMat[2][1] = covMat[1][2];
Diagonalize(covMat, m_Q, m_D);
}
void TetrahedronSet::AlignToPrincipalAxes()
{
const size_t nTetrahedra = m_tetrahedra.Size();
if (nTetrahedra == 0)
return;
double x, y, z;
for (size_t v = 0; v < nTetrahedra; ++v) {
Tetrahedron& tetrahedron = m_tetrahedra[v];
for (int32_t i = 0; i < 4; ++i) {
x = tetrahedron.m_pts[i][0] - m_barycenter[0];
y = tetrahedron.m_pts[i][1] - m_barycenter[1];
z = tetrahedron.m_pts[i][2] - m_barycenter[2];
tetrahedron.m_pts[i][0] = m_Q[0][0] * x + m_Q[1][0] * y + m_Q[2][0] * z + m_barycenter[0];
tetrahedron.m_pts[i][1] = m_Q[0][1] * x + m_Q[1][1] * y + m_Q[2][1] * z + m_barycenter[1];
tetrahedron.m_pts[i][2] = m_Q[0][2] * x + m_Q[1][2] * y + m_Q[2][2] * z + m_barycenter[2];
}
}
ComputeBB();
}
}
| 63,585 | C++ | 38.178065 | 756 | 0.455957 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/VHACD.cpp | /* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define _CRT_SECURE_NO_WARNINGS
#include <algorithm>
#include <fstream>
#include <iomanip>
#include <limits>
#include <sstream>
#if _OPENMP
#include <omp.h>
#endif // _OPENMP
#include "../public/VHACD.h"
#include "btConvexHullComputer.h"
#include "vhacdICHull.h"
#include "vhacdMesh.h"
#include "vhacdSArray.h"
#include "vhacdTimer.h"
#include "vhacdVHACD.h"
#include "vhacdVector.h"
#include "vhacdVolume.h"
#include "FloatMath.h"
// Internal debugging feature only
#define DEBUG_VISUALIZE_CONSTRAINTS 0
#if DEBUG_VISUALIZE_CONSTRAINTS
#include "NvRenderDebug.h"
extern RENDER_DEBUG::RenderDebug *gRenderDebug;
#pragma warning(disable:4702)
#endif
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define ABS(a) (((a) < 0) ? -(a) : (a))
#define ZSGN(a) (((a) < 0) ? -1 : (a) > 0 ? 1 : 0)
#define MAX_DOUBLE (1.79769e+308)
#ifdef _MSC_VER
#pragma warning(disable:4267 4100 4244 4456)
#endif
#ifdef USE_SSE
#include <immintrin.h>
const int32_t SIMD_WIDTH = 4;
inline int32_t FindMinimumElement(const float* const d, float* const _, const int32_t n)
{
// Min within vectors
__m128 min_i = _mm_set1_ps(-1.0f);
__m128 min_v = _mm_set1_ps(std::numeric_limits<float>::max());
for (int32_t i = 0; i <= n - SIMD_WIDTH; i += SIMD_WIDTH) {
const __m128 data = _mm_load_ps(&d[i]);
const __m128 pred = _mm_cmplt_ps(data, min_v);
min_i = _mm_blendv_ps(min_i, _mm_set1_ps(i), pred);
min_v = _mm_min_ps(data, min_v);
}
/* Min within vector */
const __m128 min1 = _mm_shuffle_ps(min_v, min_v, _MM_SHUFFLE(1, 0, 3, 2));
const __m128 min2 = _mm_min_ps(min_v, min1);
const __m128 min3 = _mm_shuffle_ps(min2, min2, _MM_SHUFFLE(0, 1, 0, 1));
const __m128 min4 = _mm_min_ps(min2, min3);
float min_d = _mm_cvtss_f32(min4);
// Min index
const int32_t min_idx = __builtin_ctz(_mm_movemask_ps(_mm_cmpeq_ps(min_v, min4)));
int32_t ret = min_i[min_idx] + min_idx;
// Trailing elements
for (int32_t i = (n & ~(SIMD_WIDTH - 1)); i < n; ++i) {
if (d[i] < min_d) {
min_d = d[i];
ret = i;
}
}
*m = min_d;
return ret;
}
inline int32_t FindMinimumElement(const float* const d, float* const m, const int32_t begin, const int32_t end)
{
// Leading elements
int32_t min_i = -1;
float min_d = std::numeric_limits<float>::max();
const int32_t aligned = (begin & ~(SIMD_WIDTH - 1)) + ((begin & (SIMD_WIDTH - 1)) ? SIMD_WIDTH : 0);
for (int32_t i = begin; i < std::min(end, aligned); ++i) {
if (d[i] < min_d) {
min_d = d[i];
min_i = i;
}
}
// Middle and trailing elements
float r_m = std::numeric_limits<float>::max();
const int32_t n = end - aligned;
const int32_t r_i = (n > 0) ? FindMinimumElement(&d[aligned], &r_m, n) : 0;
// Pick the lowest
if (r_m < min_d) {
*m = r_m;
return r_i + aligned;
}
else {
*m = min_d;
return min_i;
}
}
#else
inline int32_t FindMinimumElement(const float* const d, float* const m, const int32_t begin, const int32_t end)
{
int32_t idx = -1;
float min = (std::numeric_limits<float>::max)();
for (size_t i = begin; i < size_t(end); ++i) {
if (d[i] < min) {
idx = i;
min = d[i];
}
}
*m = min;
return idx;
}
#endif
//#define OCL_SOURCE_FROM_FILE
#ifndef OCL_SOURCE_FROM_FILE
const char* oclProgramSource = "\
__kernel void ComputePartialVolumes(__global short4 * voxels, \
const int32_t numVoxels, \
const float4 plane, \
const float4 minBB, \
const float4 scale, \
__local uint4 * localPartialVolumes, \
__global uint4 * partialVolumes) \
{ \
int32_t localId = get_local_id(0); \
int32_t groupSize = get_local_size(0); \
int32_t i0 = get_global_id(0) << 2; \
float4 voxel; \
uint4 v; \
voxel = convert_float4(voxels[i0]); \
v.s0 = (dot(plane, mad(scale, voxel, minBB)) >= 0.0f) * (i0 < numVoxels);\
voxel = convert_float4(voxels[i0 + 1]); \
v.s1 = (dot(plane, mad(scale, voxel, minBB)) >= 0.0f) * (i0 + 1 < numVoxels);\
voxel = convert_float4(voxels[i0 + 2]); \
v.s2 = (dot(plane, mad(scale, voxel, minBB)) >= 0.0f) * (i0 + 2 < numVoxels);\
voxel = convert_float4(voxels[i0 + 3]); \
v.s3 = (dot(plane, mad(scale, voxel, minBB)) >= 0.0f) * (i0 + 3 < numVoxels);\
localPartialVolumes[localId] = v; \
barrier(CLK_LOCAL_MEM_FENCE); \
for (int32_t i = groupSize >> 1; i > 0; i >>= 1) \
{ \
if (localId < i) \
{ \
localPartialVolumes[localId] += localPartialVolumes[localId + i]; \
} \
barrier(CLK_LOCAL_MEM_FENCE); \
} \
if (localId == 0) \
{ \
partialVolumes[get_group_id(0)] = localPartialVolumes[0]; \
} \
} \
__kernel void ComputePartialSums(__global uint4 * data, \
const int32_t dataSize, \
__local uint4 * partialSums) \
{ \
int32_t globalId = get_global_id(0); \
int32_t localId = get_local_id(0); \
int32_t groupSize = get_local_size(0); \
int32_t i; \
if (globalId < dataSize) \
{ \
partialSums[localId] = data[globalId]; \
} \
else \
{ \
partialSums[localId] = (0, 0, 0, 0); \
} \
barrier(CLK_LOCAL_MEM_FENCE); \
for (i = groupSize >> 1; i > 0; i >>= 1) \
{ \
if (localId < i) \
{ \
partialSums[localId] += partialSums[localId + i]; \
} \
barrier(CLK_LOCAL_MEM_FENCE); \
} \
if (localId == 0) \
{ \
data[get_group_id(0)] = partialSums[0]; \
} \
}";
#endif //OCL_SOURCE_FROM_FILE
namespace VHACD {
IVHACD* CreateVHACD(void)
{
return new VHACD();
}
bool VHACD::OCLInit(void* const oclDevice, IUserLogger* const logger)
{
#ifdef CL_VERSION_1_1
m_oclDevice = (cl_device_id*)oclDevice;
cl_int error;
m_oclContext = clCreateContext(NULL, 1, m_oclDevice, NULL, NULL, &error);
if (error != CL_SUCCESS) {
if (logger) {
logger->Log("Couldn't create context\n");
}
return false;
}
#ifdef OCL_SOURCE_FROM_FILE
std::string cl_files = OPENCL_CL_FILES;
// read kernal from file
#ifdef _WIN32
std::replace(cl_files.begin(), cl_files.end(), '/', '\\');
#endif // _WIN32
FILE* program_handle = fopen(cl_files.c_str(), "rb");
fseek(program_handle, 0, SEEK_END);
size_t program_size = ftell(program_handle);
rewind(program_handle);
char* program_buffer = new char[program_size + 1];
program_buffer[program_size] = '\0';
fread(program_buffer, sizeof(char), program_size, program_handle);
fclose(program_handle);
// create program
m_oclProgram = clCreateProgramWithSource(m_oclContext, 1, (const char**)&program_buffer, &program_size, &error);
delete[] program_buffer;
#else
size_t program_size = strlen(oclProgramSource);
m_oclProgram = clCreateProgramWithSource(m_oclContext, 1, (const char**)&oclProgramSource, &program_size, &error);
#endif
if (error != CL_SUCCESS) {
if (logger) {
logger->Log("Couldn't create program\n");
}
return false;
}
/* Build program */
error = clBuildProgram(m_oclProgram, 1, m_oclDevice, "-cl-denorms-are-zero", NULL, NULL);
if (error != CL_SUCCESS) {
size_t log_size;
/* Find Size of log and print to std output */
clGetProgramBuildInfo(m_oclProgram, *m_oclDevice, CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size);
char* program_log = new char[log_size + 2];
program_log[log_size] = '\n';
program_log[log_size + 1] = '\0';
clGetProgramBuildInfo(m_oclProgram, *m_oclDevice, CL_PROGRAM_BUILD_LOG, log_size + 1, program_log, NULL);
if (logger) {
logger->Log("Couldn't build program\n");
logger->Log(program_log);
}
delete[] program_log;
return false;
}
delete[] m_oclQueue;
delete[] m_oclKernelComputePartialVolumes;
delete[] m_oclKernelComputeSum;
m_oclQueue = new cl_command_queue[m_ompNumProcessors];
m_oclKernelComputePartialVolumes = new cl_kernel[m_ompNumProcessors];
m_oclKernelComputeSum = new cl_kernel[m_ompNumProcessors];
const char nameKernelComputePartialVolumes[] = "ComputePartialVolumes";
const char nameKernelComputeSum[] = "ComputePartialSums";
for (int32_t k = 0; k < m_ompNumProcessors; ++k) {
m_oclKernelComputePartialVolumes[k] = clCreateKernel(m_oclProgram, nameKernelComputePartialVolumes, &error);
if (error != CL_SUCCESS) {
if (logger) {
logger->Log("Couldn't create kernel\n");
}
return false;
}
m_oclKernelComputeSum[k] = clCreateKernel(m_oclProgram, nameKernelComputeSum, &error);
if (error != CL_SUCCESS) {
if (logger) {
logger->Log("Couldn't create kernel\n");
}
return false;
}
}
error = clGetKernelWorkGroupInfo(m_oclKernelComputePartialVolumes[0],
*m_oclDevice,
CL_KERNEL_WORK_GROUP_SIZE,
sizeof(size_t),
&m_oclWorkGroupSize,
NULL);
size_t workGroupSize = 0;
error = clGetKernelWorkGroupInfo(m_oclKernelComputeSum[0],
*m_oclDevice,
CL_KERNEL_WORK_GROUP_SIZE,
sizeof(size_t),
&workGroupSize,
NULL);
if (error != CL_SUCCESS) {
if (logger) {
logger->Log("Couldn't query work group info\n");
}
return false;
}
if (workGroupSize < m_oclWorkGroupSize) {
m_oclWorkGroupSize = workGroupSize;
}
for (int32_t k = 0; k < m_ompNumProcessors; ++k) {
m_oclQueue[k] = clCreateCommandQueue(m_oclContext, *m_oclDevice, 0 /*CL_QUEUE_PROFILING_ENABLE*/, &error);
if (error != CL_SUCCESS) {
if (logger) {
logger->Log("Couldn't create queue\n");
}
return false;
}
}
return true;
#else //CL_VERSION_1_1
return false;
#endif //CL_VERSION_1_1
}
bool VHACD::OCLRelease(IUserLogger* const logger)
{
#ifdef CL_VERSION_1_1
cl_int error;
if (m_oclKernelComputePartialVolumes) {
for (int32_t k = 0; k < m_ompNumProcessors; ++k) {
error = clReleaseKernel(m_oclKernelComputePartialVolumes[k]);
if (error != CL_SUCCESS) {
if (logger) {
logger->Log("Couldn't release kernal\n");
}
return false;
}
}
delete[] m_oclKernelComputePartialVolumes;
}
if (m_oclKernelComputeSum) {
for (int32_t k = 0; k < m_ompNumProcessors; ++k) {
error = clReleaseKernel(m_oclKernelComputeSum[k]);
if (error != CL_SUCCESS) {
if (logger) {
logger->Log("Couldn't release kernal\n");
}
return false;
}
}
delete[] m_oclKernelComputeSum;
}
if (m_oclQueue) {
for (int32_t k = 0; k < m_ompNumProcessors; ++k) {
error = clReleaseCommandQueue(m_oclQueue[k]);
if (error != CL_SUCCESS) {
if (logger) {
logger->Log("Couldn't release queue\n");
}
return false;
}
}
delete[] m_oclQueue;
}
error = clReleaseProgram(m_oclProgram);
if (error != CL_SUCCESS) {
if (logger) {
logger->Log("Couldn't release program\n");
}
return false;
}
error = clReleaseContext(m_oclContext);
if (error != CL_SUCCESS) {
if (logger) {
logger->Log("Couldn't release context\n");
}
return false;
}
return true;
#else //CL_VERSION_1_1
return false;
#endif //CL_VERSION_1_1
}
void VHACD::ComputePrimitiveSet(const Parameters& params)
{
if (GetCancel()) {
return;
}
m_timer.Tic();
m_stage = "Compute primitive set";
m_operation = "Convert volume to pset";
std::ostringstream msg;
if (params.m_logger) {
msg << "+ " << m_stage << std::endl;
params.m_logger->Log(msg.str().c_str());
}
Update(0.0, 0.0, params);
if (params.m_mode == 0) {
VoxelSet* vset = new VoxelSet;
m_volume->Convert(*vset);
m_pset = vset;
}
else {
TetrahedronSet* tset = new TetrahedronSet;
m_volume->Convert(*tset);
m_pset = tset;
}
delete m_volume;
m_volume = 0;
if (params.m_logger) {
msg.str("");
msg << "\t # primitives " << m_pset->GetNPrimitives() << std::endl;
msg << "\t # inside surface " << m_pset->GetNPrimitivesInsideSurf() << std::endl;
msg << "\t # on surface " << m_pset->GetNPrimitivesOnSurf() << std::endl;
params.m_logger->Log(msg.str().c_str());
}
m_overallProgress = 15.0;
Update(100.0, 100.0, params);
m_timer.Toc();
if (params.m_logger) {
msg.str("");
msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl;
params.m_logger->Log(msg.str().c_str());
}
}
bool VHACD::Compute(const double* const points, const uint32_t nPoints,
const uint32_t* const triangles,const uint32_t nTriangles, const Parameters& params)
{
return ComputeACD(points, nPoints, triangles, nTriangles, params);
}
bool VHACD::Compute(const float* const points,const uint32_t nPoints,
const uint32_t* const triangles,const uint32_t nTriangles, const Parameters& params)
{
return ComputeACD(points, nPoints, triangles, nTriangles, params);
}
double ComputePreferredCuttingDirection(const PrimitiveSet* const tset, Vec3<double>& dir)
{
double ex = tset->GetEigenValue(AXIS_X);
double ey = tset->GetEigenValue(AXIS_Y);
double ez = tset->GetEigenValue(AXIS_Z);
double vx = (ey - ez) * (ey - ez);
double vy = (ex - ez) * (ex - ez);
double vz = (ex - ey) * (ex - ey);
if (vx < vy && vx < vz) {
double e = ey * ey + ez * ez;
dir[0] = 1.0;
dir[1] = 0.0;
dir[2] = 0.0;
return (e == 0.0) ? 0.0 : 1.0 - vx / e;
}
else if (vy < vx && vy < vz) {
double e = ex * ex + ez * ez;
dir[0] = 0.0;
dir[1] = 1.0;
dir[2] = 0.0;
return (e == 0.0) ? 0.0 : 1.0 - vy / e;
}
else {
double e = ex * ex + ey * ey;
dir[0] = 0.0;
dir[1] = 0.0;
dir[2] = 1.0;
return (e == 0.0) ? 0.0 : 1.0 - vz / e;
}
}
void ComputeAxesAlignedClippingPlanes(const VoxelSet& vset, const short downsampling, SArray<Plane>& planes)
{
const Vec3<short> minV = vset.GetMinBBVoxels();
const Vec3<short> maxV = vset.GetMaxBBVoxels();
Vec3<double> pt;
Plane plane;
const short i0 = minV[0];
const short i1 = maxV[0];
plane.m_a = 1.0;
plane.m_b = 0.0;
plane.m_c = 0.0;
plane.m_axis = AXIS_X;
for (short i = i0; i <= i1; i += downsampling) {
pt = vset.GetPoint(Vec3<double>(i + 0.5, 0.0, 0.0));
plane.m_d = -pt[0];
plane.m_index = i;
planes.PushBack(plane);
}
const short j0 = minV[1];
const short j1 = maxV[1];
plane.m_a = 0.0;
plane.m_b = 1.0;
plane.m_c = 0.0;
plane.m_axis = AXIS_Y;
for (short j = j0; j <= j1; j += downsampling) {
pt = vset.GetPoint(Vec3<double>(0.0, j + 0.5, 0.0));
plane.m_d = -pt[1];
plane.m_index = j;
planes.PushBack(plane);
}
const short k0 = minV[2];
const short k1 = maxV[2];
plane.m_a = 0.0;
plane.m_b = 0.0;
plane.m_c = 1.0;
plane.m_axis = AXIS_Z;
for (short k = k0; k <= k1; k += downsampling) {
pt = vset.GetPoint(Vec3<double>(0.0, 0.0, k + 0.5));
plane.m_d = -pt[2];
plane.m_index = k;
planes.PushBack(plane);
}
}
void ComputeAxesAlignedClippingPlanes(const TetrahedronSet& tset, const short downsampling, SArray<Plane>& planes)
{
const Vec3<double> minV = tset.GetMinBB();
const Vec3<double> maxV = tset.GetMaxBB();
const double scale = tset.GetSacle();
const short i0 = 0;
const short j0 = 0;
const short k0 = 0;
const short i1 = static_cast<short>((maxV[0] - minV[0]) / scale + 0.5);
const short j1 = static_cast<short>((maxV[1] - minV[1]) / scale + 0.5);
const short k1 = static_cast<short>((maxV[2] - minV[2]) / scale + 0.5);
Plane plane;
plane.m_a = 1.0;
plane.m_b = 0.0;
plane.m_c = 0.0;
plane.m_axis = AXIS_X;
for (short i = i0; i <= i1; i += downsampling) {
double x = minV[0] + scale * i;
plane.m_d = -x;
plane.m_index = i;
planes.PushBack(plane);
}
plane.m_a = 0.0;
plane.m_b = 1.0;
plane.m_c = 0.0;
plane.m_axis = AXIS_Y;
for (short j = j0; j <= j1; j += downsampling) {
double y = minV[1] + scale * j;
plane.m_d = -y;
plane.m_index = j;
planes.PushBack(plane);
}
plane.m_a = 0.0;
plane.m_b = 0.0;
plane.m_c = 1.0;
plane.m_axis = AXIS_Z;
for (short k = k0; k <= k1; k += downsampling) {
double z = minV[2] + scale * k;
plane.m_d = -z;
plane.m_index = k;
planes.PushBack(plane);
}
}
void RefineAxesAlignedClippingPlanes(const VoxelSet& vset, const Plane& bestPlane, const short downsampling,
SArray<Plane>& planes)
{
const Vec3<short> minV = vset.GetMinBBVoxels();
const Vec3<short> maxV = vset.GetMaxBBVoxels();
Vec3<double> pt;
Plane plane;
if (bestPlane.m_axis == AXIS_X) {
const short i0 = MAX(minV[0], bestPlane.m_index - downsampling);
const short i1 = MIN(maxV[0], bestPlane.m_index + downsampling);
plane.m_a = 1.0;
plane.m_b = 0.0;
plane.m_c = 0.0;
plane.m_axis = AXIS_X;
for (short i = i0; i <= i1; ++i) {
pt = vset.GetPoint(Vec3<double>(i + 0.5, 0.0, 0.0));
plane.m_d = -pt[0];
plane.m_index = i;
planes.PushBack(plane);
}
}
else if (bestPlane.m_axis == AXIS_Y) {
const short j0 = MAX(minV[1], bestPlane.m_index - downsampling);
const short j1 = MIN(maxV[1], bestPlane.m_index + downsampling);
plane.m_a = 0.0;
plane.m_b = 1.0;
plane.m_c = 0.0;
plane.m_axis = AXIS_Y;
for (short j = j0; j <= j1; ++j) {
pt = vset.GetPoint(Vec3<double>(0.0, j + 0.5, 0.0));
plane.m_d = -pt[1];
plane.m_index = j;
planes.PushBack(plane);
}
}
else {
const short k0 = MAX(minV[2], bestPlane.m_index - downsampling);
const short k1 = MIN(maxV[2], bestPlane.m_index + downsampling);
plane.m_a = 0.0;
plane.m_b = 0.0;
plane.m_c = 1.0;
plane.m_axis = AXIS_Z;
for (short k = k0; k <= k1; ++k) {
pt = vset.GetPoint(Vec3<double>(0.0, 0.0, k + 0.5));
plane.m_d = -pt[2];
plane.m_index = k;
planes.PushBack(plane);
}
}
}
void RefineAxesAlignedClippingPlanes(const TetrahedronSet& tset, const Plane& bestPlane, const short downsampling,
SArray<Plane>& planes)
{
const Vec3<double> minV = tset.GetMinBB();
const Vec3<double> maxV = tset.GetMaxBB();
const double scale = tset.GetSacle();
Plane plane;
if (bestPlane.m_axis == AXIS_X) {
const short i0 = MAX(0, bestPlane.m_index - downsampling);
const short i1 = static_cast<short>(MIN((maxV[0] - minV[0]) / scale + 0.5, bestPlane.m_index + downsampling));
plane.m_a = 1.0;
plane.m_b = 0.0;
plane.m_c = 0.0;
plane.m_axis = AXIS_X;
for (short i = i0; i <= i1; ++i) {
double x = minV[0] + scale * i;
plane.m_d = -x;
plane.m_index = i;
planes.PushBack(plane);
}
}
else if (bestPlane.m_axis == AXIS_Y) {
const short j0 = MAX(0, bestPlane.m_index - downsampling);
const short j1 = static_cast<short>(MIN((maxV[1] - minV[1]) / scale + 0.5, bestPlane.m_index + downsampling));
plane.m_a = 0.0;
plane.m_b = 1.0;
plane.m_c = 0.0;
plane.m_axis = AXIS_Y;
for (short j = j0; j <= j1; ++j) {
double y = minV[1] + scale * j;
plane.m_d = -y;
plane.m_index = j;
planes.PushBack(plane);
}
}
else {
const short k0 = MAX(0, bestPlane.m_index - downsampling);
const short k1 = static_cast<short>(MIN((maxV[2] - minV[2]) / scale + 0.5, bestPlane.m_index + downsampling));
plane.m_a = 0.0;
plane.m_b = 0.0;
plane.m_c = 1.0;
plane.m_axis = AXIS_Z;
for (short k = k0; k <= k1; ++k) {
double z = minV[2] + scale * k;
plane.m_d = -z;
plane.m_index = k;
planes.PushBack(plane);
}
}
}
inline double ComputeLocalConcavity(const double volume, const double volumeCH)
{
return fabs(volumeCH - volume) / volumeCH;
}
inline double ComputeConcavity(const double volume, const double volumeCH, const double volume0)
{
return fabs(volumeCH - volume) / volume0;
}
//#define DEBUG_TEMP
void VHACD::ComputeBestClippingPlane(const PrimitiveSet* inputPSet, const double volume, const SArray<Plane>& planes,
const Vec3<double>& preferredCuttingDirection, const double w, const double alpha, const double beta,
const int32_t convexhullDownsampling, const double progress0, const double progress1, Plane& bestPlane,
double& minConcavity, const Parameters& params)
{
if (GetCancel()) {
return;
}
char msg[256];
size_t nPrimitives = inputPSet->GetNPrimitives();
bool oclAcceleration = (nPrimitives > OCL_MIN_NUM_PRIMITIVES && params.m_oclAcceleration && params.m_mode == 0) ? true : false;
int32_t iBest = -1;
int32_t nPlanes = static_cast<int32_t>(planes.Size());
bool cancel = false;
int32_t done = 0;
double minTotal = MAX_DOUBLE;
double minBalance = MAX_DOUBLE;
double minSymmetry = MAX_DOUBLE;
minConcavity = MAX_DOUBLE;
SArray<Vec3<double> >* chPts = new SArray<Vec3<double> >[2 * m_ompNumProcessors];
Mesh* chs = new Mesh[2 * m_ompNumProcessors];
PrimitiveSet* onSurfacePSet = inputPSet->Create();
inputPSet->SelectOnSurface(onSurfacePSet);
PrimitiveSet** psets = 0;
if (!params.m_convexhullApproximation) {
psets = new PrimitiveSet*[2 * m_ompNumProcessors];
for (int32_t i = 0; i < 2 * m_ompNumProcessors; ++i) {
psets[i] = inputPSet->Create();
}
}
#ifdef CL_VERSION_1_1
// allocate OpenCL data structures
cl_mem voxels;
cl_mem* partialVolumes = 0;
size_t globalSize = 0;
size_t nWorkGroups = 0;
double unitVolume = 0.0;
if (oclAcceleration) {
VoxelSet* vset = (VoxelSet*)inputPSet;
const Vec3<double> minBB = vset->GetMinBB();
const float fMinBB[4] = { (float)minBB[0], (float)minBB[1], (float)minBB[2], 1.0f };
const float fSclae[4] = { (float)vset->GetScale(), (float)vset->GetScale(), (float)vset->GetScale(), 0.0f };
const int32_t nVoxels = (int32_t)nPrimitives;
unitVolume = vset->GetUnitVolume();
nWorkGroups = (nPrimitives + 4 * m_oclWorkGroupSize - 1) / (4 * m_oclWorkGroupSize);
globalSize = nWorkGroups * m_oclWorkGroupSize;
cl_int error;
voxels = clCreateBuffer(m_oclContext,
CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
sizeof(Voxel) * nPrimitives,
vset->GetVoxels(),
&error);
if (error != CL_SUCCESS) {
if (params.m_logger) {
params.m_logger->Log("Couldn't create buffer\n");
}
SetCancel(true);
}
partialVolumes = new cl_mem[m_ompNumProcessors];
for (int32_t i = 0; i < m_ompNumProcessors; ++i) {
partialVolumes[i] = clCreateBuffer(m_oclContext,
CL_MEM_WRITE_ONLY,
sizeof(uint32_t) * 4 * nWorkGroups,
NULL,
&error);
if (error != CL_SUCCESS) {
if (params.m_logger) {
params.m_logger->Log("Couldn't create buffer\n");
}
SetCancel(true);
break;
}
error = clSetKernelArg(m_oclKernelComputePartialVolumes[i], 0, sizeof(cl_mem), &voxels);
error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 1, sizeof(uint32_t), &nVoxels);
error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 3, sizeof(float) * 4, fMinBB);
error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 4, sizeof(float) * 4, &fSclae);
error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 5, sizeof(uint32_t) * 4 * m_oclWorkGroupSize, NULL);
error |= clSetKernelArg(m_oclKernelComputePartialVolumes[i], 6, sizeof(cl_mem), &(partialVolumes[i]));
error |= clSetKernelArg(m_oclKernelComputeSum[i], 0, sizeof(cl_mem), &(partialVolumes[i]));
error |= clSetKernelArg(m_oclKernelComputeSum[i], 2, sizeof(uint32_t) * 4 * m_oclWorkGroupSize, NULL);
if (error != CL_SUCCESS) {
if (params.m_logger) {
params.m_logger->Log("Couldn't kernel atguments \n");
}
SetCancel(true);
}
}
}
#else // CL_VERSION_1_1
oclAcceleration = false;
#endif // CL_VERSION_1_1
#ifdef DEBUG_TEMP
Timer timerComputeCost;
timerComputeCost.Tic();
#endif // DEBUG_TEMP
#if USE_THREAD == 1 && _OPENMP
#pragma omp parallel for
#endif
for (int32_t x = 0; x < nPlanes; ++x) {
int32_t threadID = 0;
#if USE_THREAD == 1 && _OPENMP
threadID = omp_get_thread_num();
#pragma omp flush(cancel)
#endif
if (!cancel) {
//Update progress
if (GetCancel()) {
cancel = true;
#if USE_THREAD == 1 && _OPENMP
#pragma omp flush(cancel)
#endif
}
Plane plane = planes[x];
if (oclAcceleration) {
#ifdef CL_VERSION_1_1
const float fPlane[4] = { (float)plane.m_a, (float)plane.m_b, (float)plane.m_c, (float)plane.m_d };
cl_int error = clSetKernelArg(m_oclKernelComputePartialVolumes[threadID], 2, sizeof(float) * 4, fPlane);
if (error != CL_SUCCESS) {
if (params.m_logger) {
params.m_logger->Log("Couldn't kernel atguments \n");
}
SetCancel(true);
}
error = clEnqueueNDRangeKernel(m_oclQueue[threadID], m_oclKernelComputePartialVolumes[threadID],
1, NULL, &globalSize, &m_oclWorkGroupSize, 0, NULL, NULL);
if (error != CL_SUCCESS) {
if (params.m_logger) {
params.m_logger->Log("Couldn't run kernel \n");
}
SetCancel(true);
}
int32_t nValues = (int32_t)nWorkGroups;
while (nValues > 1) {
error = clSetKernelArg(m_oclKernelComputeSum[threadID], 1, sizeof(int32_t), &nValues);
if (error != CL_SUCCESS) {
if (params.m_logger) {
params.m_logger->Log("Couldn't kernel atguments \n");
}
SetCancel(true);
}
size_t nWorkGroups = (nValues + m_oclWorkGroupSize - 1) / m_oclWorkGroupSize;
size_t globalSize = nWorkGroups * m_oclWorkGroupSize;
error = clEnqueueNDRangeKernel(m_oclQueue[threadID], m_oclKernelComputeSum[threadID],
1, NULL, &globalSize, &m_oclWorkGroupSize, 0, NULL, NULL);
if (error != CL_SUCCESS) {
if (params.m_logger) {
params.m_logger->Log("Couldn't run kernel \n");
}
SetCancel(true);
}
nValues = (int32_t)nWorkGroups;
}
#endif // CL_VERSION_1_1
}
Mesh& leftCH = chs[threadID];
Mesh& rightCH = chs[threadID + m_ompNumProcessors];
rightCH.ResizePoints(0);
leftCH.ResizePoints(0);
rightCH.ResizeTriangles(0);
leftCH.ResizeTriangles(0);
// compute convex-hulls
#ifdef TEST_APPROX_CH
double volumeLeftCH1;
double volumeRightCH1;
#endif //TEST_APPROX_CH
if (params.m_convexhullApproximation) {
SArray<Vec3<double> >& leftCHPts = chPts[threadID];
SArray<Vec3<double> >& rightCHPts = chPts[threadID + m_ompNumProcessors];
rightCHPts.Resize(0);
leftCHPts.Resize(0);
onSurfacePSet->Intersect(plane, &rightCHPts, &leftCHPts, convexhullDownsampling * 32);
inputPSet->GetConvexHull().Clip(plane, rightCHPts, leftCHPts);
rightCH.ComputeConvexHull((double*)rightCHPts.Data(), rightCHPts.Size());
leftCH.ComputeConvexHull((double*)leftCHPts.Data(), leftCHPts.Size());
#ifdef TEST_APPROX_CH
Mesh leftCH1;
Mesh rightCH1;
VoxelSet right;
VoxelSet left;
onSurfacePSet->Clip(plane, &right, &left);
right.ComputeConvexHull(rightCH1, convexhullDownsampling);
left.ComputeConvexHull(leftCH1, convexhullDownsampling);
volumeLeftCH1 = leftCH1.ComputeVolume();
volumeRightCH1 = rightCH1.ComputeVolume();
#endif //TEST_APPROX_CH
}
else {
PrimitiveSet* const right = psets[threadID];
PrimitiveSet* const left = psets[threadID + m_ompNumProcessors];
onSurfacePSet->Clip(plane, right, left);
right->ComputeConvexHull(rightCH, convexhullDownsampling);
left->ComputeConvexHull(leftCH, convexhullDownsampling);
}
double volumeLeftCH = leftCH.ComputeVolume();
double volumeRightCH = rightCH.ComputeVolume();
// compute clipped volumes
double volumeLeft = 0.0;
double volumeRight = 0.0;
if (oclAcceleration) {
#ifdef CL_VERSION_1_1
uint32_t volumes[4];
cl_int error = clEnqueueReadBuffer(m_oclQueue[threadID], partialVolumes[threadID], CL_TRUE,
0, sizeof(uint32_t) * 4, volumes, 0, NULL, NULL);
size_t nPrimitivesRight = volumes[0] + volumes[1] + volumes[2] + volumes[3];
size_t nPrimitivesLeft = nPrimitives - nPrimitivesRight;
volumeRight = nPrimitivesRight * unitVolume;
volumeLeft = nPrimitivesLeft * unitVolume;
if (error != CL_SUCCESS) {
if (params.m_logger) {
params.m_logger->Log("Couldn't read buffer \n");
}
SetCancel(true);
}
#endif // CL_VERSION_1_1
}
else {
inputPSet->ComputeClippedVolumes(plane, volumeRight, volumeLeft);
}
double concavityLeft = ComputeConcavity(volumeLeft, volumeLeftCH, m_volumeCH0);
double concavityRight = ComputeConcavity(volumeRight, volumeRightCH, m_volumeCH0);
double concavity = (concavityLeft + concavityRight);
// compute cost
double balance = alpha * fabs(volumeLeft - volumeRight) / m_volumeCH0;
double d = w * (preferredCuttingDirection[0] * plane.m_a + preferredCuttingDirection[1] * plane.m_b + preferredCuttingDirection[2] * plane.m_c);
double symmetry = beta * d;
double total = concavity + balance + symmetry;
#if USE_THREAD == 1 && _OPENMP
#pragma omp critical
#endif
{
if (total < minTotal || (total == minTotal && x < iBest)) {
minConcavity = concavity;
minBalance = balance;
minSymmetry = symmetry;
bestPlane = plane;
minTotal = total;
iBest = x;
}
++done;
if (!(done & 127)) // reduce update frequency
{
double progress = done * (progress1 - progress0) / nPlanes + progress0;
Update(m_stageProgress, progress, params);
}
}
}
}
#ifdef DEBUG_TEMP
timerComputeCost.Toc();
printf_s("Cost[%i] = %f\n", nPlanes, timerComputeCost.GetElapsedTime());
#endif // DEBUG_TEMP
#ifdef CL_VERSION_1_1
if (oclAcceleration) {
clReleaseMemObject(voxels);
for (int32_t i = 0; i < m_ompNumProcessors; ++i) {
clReleaseMemObject(partialVolumes[i]);
}
delete[] partialVolumes;
}
#endif // CL_VERSION_1_1
if (psets) {
for (int32_t i = 0; i < 2 * m_ompNumProcessors; ++i) {
delete psets[i];
}
delete[] psets;
}
delete onSurfacePSet;
delete[] chPts;
delete[] chs;
if (params.m_logger) {
sprintf(msg, "\n\t\t\t Best %04i T=%2.6f C=%2.6f B=%2.6f S=%2.6f (%1.1f, %1.1f, %1.1f, %3.3f)\n\n", iBest, minTotal, minConcavity, minBalance, minSymmetry, bestPlane.m_a, bestPlane.m_b, bestPlane.m_c, bestPlane.m_d);
params.m_logger->Log(msg);
}
}
void VHACD::ComputeACD(const Parameters& params)
{
if (GetCancel()) {
return;
}
m_timer.Tic();
m_stage = "Approximate Convex Decomposition";
m_stageProgress = 0.0;
std::ostringstream msg;
if (params.m_logger) {
msg << "+ " << m_stage << std::endl;
params.m_logger->Log(msg.str().c_str());
}
SArray<PrimitiveSet*> parts;
SArray<PrimitiveSet*> inputParts;
SArray<PrimitiveSet*> temp;
inputParts.PushBack(m_pset);
m_pset = 0;
SArray<Plane> planes;
SArray<Plane> planesRef;
uint32_t sub = 0;
bool firstIteration = true;
m_volumeCH0 = 1.0;
// Compute the decomposition depth based on the number of convex hulls being requested..
uint32_t hullCount = 2;
uint32_t depth = 1;
while (params.m_maxConvexHulls > hullCount)
{
depth++;
hullCount *= 2;
}
// We must always increment the decomposition depth one higher than the maximum number of hulls requested.
// The reason for this is as follows.
// Say, for example, the user requests 32 convex hulls exactly. This would be a decomposition depth of 5.
// However, when we do that, we do *not* necessarily get 32 hulls as a result. This is because, during
// the recursive descent of the binary tree, one or more of the leaf nodes may have no concavity and
// will not be split. So, in this way, even with a decomposition depth of 5, you can produce fewer than
// 32 hulls. So, in this case, we would set the decomposition depth to 6 (producing up to as high as 64 convex hulls).
// Then, the merge step which combines over-described hulls down to the user requested amount, we will end up
// getting exactly 32 convex hulls as a result.
// We could just allow the artist to directly control the decomposition depth directly, but this would be a bit
// too complex and the preference is simply to let them specify how many hulls they want and derive the solution
// from that.
depth++;
while (sub++ < depth && inputParts.Size() > 0 && !m_cancel) {
msg.str("");
msg << "Subdivision level " << sub;
m_operation = msg.str();
if (params.m_logger) {
msg.str("");
msg << "\t Subdivision level " << sub << std::endl;
params.m_logger->Log(msg.str().c_str());
}
double maxConcavity = 0.0;
const size_t nInputParts = inputParts.Size();
Update(m_stageProgress, 0.0, params);
for (size_t p = 0; p < nInputParts && !m_cancel; ++p) {
const double progress0 = p * 100.0 / nInputParts;
const double progress1 = (p + 0.75) * 100.0 / nInputParts;
const double progress2 = (p + 1.00) * 100.0 / nInputParts;
Update(m_stageProgress, progress0, params);
PrimitiveSet* pset = inputParts[p];
inputParts[p] = 0;
double volume = pset->ComputeVolume();
pset->ComputeBB();
pset->ComputePrincipalAxes();
if (params.m_pca) {
pset->AlignToPrincipalAxes();
}
pset->ComputeConvexHull(pset->GetConvexHull());
double volumeCH = fabs(pset->GetConvexHull().ComputeVolume());
if (firstIteration) {
m_volumeCH0 = volumeCH;
}
double concavity = ComputeConcavity(volume, volumeCH, m_volumeCH0);
double error = 1.01 * pset->ComputeMaxVolumeError() / m_volumeCH0;
if (firstIteration) {
firstIteration = false;
}
if (params.m_logger) {
msg.str("");
msg << "\t -> Part[" << p
<< "] C = " << concavity
<< ", E = " << error
<< ", VS = " << pset->GetNPrimitivesOnSurf()
<< ", VI = " << pset->GetNPrimitivesInsideSurf()
<< std::endl;
params.m_logger->Log(msg.str().c_str());
}
if (concavity > params.m_concavity && concavity > error) {
Vec3<double> preferredCuttingDirection;
double w = ComputePreferredCuttingDirection(pset, preferredCuttingDirection);
planes.Resize(0);
if (params.m_mode == 0) {
VoxelSet* vset = (VoxelSet*)pset;
ComputeAxesAlignedClippingPlanes(*vset, params.m_planeDownsampling, planes);
}
else {
TetrahedronSet* tset = (TetrahedronSet*)pset;
ComputeAxesAlignedClippingPlanes(*tset, params.m_planeDownsampling, planes);
}
if (params.m_logger) {
msg.str("");
msg << "\t\t [Regular sampling] Number of clipping planes " << planes.Size() << std::endl;
params.m_logger->Log(msg.str().c_str());
}
Plane bestPlane;
double minConcavity = MAX_DOUBLE;
ComputeBestClippingPlane(pset,
volume,
planes,
preferredCuttingDirection,
w,
concavity * params.m_alpha,
concavity * params.m_beta,
params.m_convexhullDownsampling,
progress0,
progress1,
bestPlane,
minConcavity,
params);
if (!m_cancel && (params.m_planeDownsampling > 1 || params.m_convexhullDownsampling > 1)) {
planesRef.Resize(0);
if (params.m_mode == 0) {
VoxelSet* vset = (VoxelSet*)pset;
RefineAxesAlignedClippingPlanes(*vset, bestPlane, params.m_planeDownsampling, planesRef);
}
else {
TetrahedronSet* tset = (TetrahedronSet*)pset;
RefineAxesAlignedClippingPlanes(*tset, bestPlane, params.m_planeDownsampling, planesRef);
}
if (params.m_logger) {
msg.str("");
msg << "\t\t [Refining] Number of clipping planes " << planesRef.Size() << std::endl;
params.m_logger->Log(msg.str().c_str());
}
ComputeBestClippingPlane(pset,
volume,
planesRef,
preferredCuttingDirection,
w,
concavity * params.m_alpha,
concavity * params.m_beta,
1, // convexhullDownsampling = 1
progress1,
progress2,
bestPlane,
minConcavity,
params);
}
if (GetCancel()) {
delete pset; // clean up
break;
}
else {
if (maxConcavity < minConcavity) {
maxConcavity = minConcavity;
}
PrimitiveSet* bestLeft = pset->Create();
PrimitiveSet* bestRight = pset->Create();
temp.PushBack(bestLeft);
temp.PushBack(bestRight);
pset->Clip(bestPlane, bestRight, bestLeft);
if (params.m_pca) {
bestRight->RevertAlignToPrincipalAxes();
bestLeft->RevertAlignToPrincipalAxes();
}
delete pset;
}
}
else {
if (params.m_pca) {
pset->RevertAlignToPrincipalAxes();
}
parts.PushBack(pset);
}
}
Update(95.0 * (1.0 - maxConcavity) / (1.0 - params.m_concavity), 100.0, params);
if (GetCancel()) {
const size_t nTempParts = temp.Size();
for (size_t p = 0; p < nTempParts; ++p) {
delete temp[p];
}
temp.Resize(0);
}
else {
inputParts = temp;
temp.Resize(0);
}
}
const size_t nInputParts = inputParts.Size();
for (size_t p = 0; p < nInputParts; ++p) {
parts.PushBack(inputParts[p]);
}
if (GetCancel()) {
const size_t nParts = parts.Size();
for (size_t p = 0; p < nParts; ++p) {
delete parts[p];
}
return;
}
m_overallProgress = 90.0;
Update(m_stageProgress, 100.0, params);
msg.str("");
msg << "Generate convex-hulls";
m_operation = msg.str();
size_t nConvexHulls = parts.Size();
if (params.m_logger) {
msg.str("");
msg << "+ Generate " << nConvexHulls << " convex-hulls " << std::endl;
params.m_logger->Log(msg.str().c_str());
}
Update(m_stageProgress, 0.0, params);
m_convexHulls.Resize(0);
for (size_t p = 0; p < nConvexHulls && !m_cancel; ++p) {
Update(m_stageProgress, p * 100.0 / nConvexHulls, params);
m_convexHulls.PushBack(new Mesh);
parts[p]->ComputeConvexHull(*m_convexHulls[p]);
size_t nv = m_convexHulls[p]->GetNPoints();
double x, y, z;
for (size_t i = 0; i < nv; ++i) {
Vec3<double>& pt = m_convexHulls[p]->GetPoint(i);
x = pt[0];
y = pt[1];
z = pt[2];
pt[0] = m_rot[0][0] * x + m_rot[0][1] * y + m_rot[0][2] * z + m_barycenter[0];
pt[1] = m_rot[1][0] * x + m_rot[1][1] * y + m_rot[1][2] * z + m_barycenter[1];
pt[2] = m_rot[2][0] * x + m_rot[2][1] * y + m_rot[2][2] * z + m_barycenter[2];
}
}
const size_t nParts = parts.Size();
for (size_t p = 0; p < nParts; ++p) {
delete parts[p];
parts[p] = 0;
}
parts.Resize(0);
if (GetCancel()) {
const size_t nConvexHulls = m_convexHulls.Size();
for (size_t p = 0; p < nConvexHulls; ++p) {
delete m_convexHulls[p];
}
m_convexHulls.Clear();
return;
}
m_overallProgress = 95.0;
Update(100.0, 100.0, params);
m_timer.Toc();
if (params.m_logger) {
msg.str("");
msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl;
params.m_logger->Log(msg.str().c_str());
}
}
void AddPoints(const Mesh* const mesh, SArray<Vec3<double> >& pts)
{
const int32_t n = (int32_t)mesh->GetNPoints();
for (int32_t i = 0; i < n; ++i) {
pts.PushBack(mesh->GetPoint(i));
}
}
void ComputeConvexHull(const Mesh* const ch1, const Mesh* const ch2, SArray<Vec3<double> >& pts, Mesh* const combinedCH)
{
pts.Resize(0);
AddPoints(ch1, pts);
AddPoints(ch2, pts);
btConvexHullComputer ch;
ch.compute((double*)pts.Data(), 3 * sizeof(double), (int32_t)pts.Size(), -1.0, -1.0);
combinedCH->ResizePoints(0);
combinedCH->ResizeTriangles(0);
for (int32_t v = 0; v < ch.vertices.size(); v++) {
combinedCH->AddPoint(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ()));
}
const int32_t nt = ch.faces.size();
for (int32_t t = 0; t < nt; ++t) {
const btConvexHullComputer::Edge* sourceEdge = &(ch.edges[ch.faces[t]]);
int32_t a = sourceEdge->getSourceVertex();
int32_t b = sourceEdge->getTargetVertex();
const btConvexHullComputer::Edge* edge = sourceEdge->getNextEdgeOfFace();
int32_t c = edge->getTargetVertex();
while (c != a) {
combinedCH->AddTriangle(Vec3<int32_t>(a, b, c));
edge = edge->getNextEdgeOfFace();
b = c;
c = edge->getTargetVertex();
}
}
}
void VHACD::MergeConvexHulls(const Parameters& params)
{
if (GetCancel()) {
return;
}
m_timer.Tic();
m_stage = "Merge Convex Hulls";
std::ostringstream msg;
if (params.m_logger) {
msg << "+ " << m_stage << std::endl;
params.m_logger->Log(msg.str().c_str());
}
// Get the current number of convex hulls
size_t nConvexHulls = m_convexHulls.Size();
// Iteration counter
int32_t iteration = 0;
// While we have more than at least one convex hull and the user has not asked us to cancel the operation
if (nConvexHulls > 1 && !m_cancel)
{
// Get the gamma error threshold for when to exit
SArray<Vec3<double> > pts;
Mesh combinedCH;
// Populate the cost matrix
size_t idx = 0;
SArray<float> costMatrix;
costMatrix.Resize(((nConvexHulls * nConvexHulls) - nConvexHulls) >> 1);
for (size_t p1 = 1; p1 < nConvexHulls; ++p1)
{
const float volume1 = m_convexHulls[p1]->ComputeVolume();
for (size_t p2 = 0; p2 < p1; ++p2)
{
ComputeConvexHull(m_convexHulls[p1], m_convexHulls[p2], pts, &combinedCH);
costMatrix[idx++] = ComputeConcavity(volume1 + m_convexHulls[p2]->ComputeVolume(), combinedCH.ComputeVolume(), m_volumeCH0);
}
}
// Until we cant merge below the maximum cost
size_t costSize = m_convexHulls.Size();
while (!m_cancel)
{
msg.str("");
msg << "Iteration " << iteration++;
m_operation = msg.str();
// Search for lowest cost
float bestCost = (std::numeric_limits<float>::max)();
const size_t addr = FindMinimumElement(costMatrix.Data(), &bestCost, 0, costMatrix.Size());
if ( (costSize-1) < params.m_maxConvexHulls)
{
break;
}
const size_t addrI = (static_cast<int32_t>(sqrt(1 + (8 * addr))) - 1) >> 1;
const size_t p1 = addrI + 1;
const size_t p2 = addr - ((addrI * (addrI + 1)) >> 1);
assert(p1 >= 0);
assert(p2 >= 0);
assert(p1 < costSize);
assert(p2 < costSize);
if (params.m_logger)
{
msg.str("");
msg << "\t\t Merging (" << p1 << ", " << p2 << ") " << bestCost << std::endl
<< std::endl;
params.m_logger->Log(msg.str().c_str());
}
// Make the lowest cost row and column into a new hull
Mesh* cch = new Mesh;
ComputeConvexHull(m_convexHulls[p1], m_convexHulls[p2], pts, cch);
delete m_convexHulls[p2];
m_convexHulls[p2] = cch;
delete m_convexHulls[p1];
std::swap(m_convexHulls[p1], m_convexHulls[m_convexHulls.Size() - 1]);
m_convexHulls.PopBack();
costSize = costSize - 1;
// Calculate costs versus the new hull
size_t rowIdx = ((p2 - 1) * p2) >> 1;
const float volume1 = m_convexHulls[p2]->ComputeVolume();
for (size_t i = 0; (i < p2) && (!m_cancel); ++i)
{
ComputeConvexHull(m_convexHulls[p2], m_convexHulls[i], pts, &combinedCH);
costMatrix[rowIdx++] = ComputeConcavity(volume1 + m_convexHulls[i]->ComputeVolume(), combinedCH.ComputeVolume(), m_volumeCH0);
}
rowIdx += p2;
for (size_t i = p2 + 1; (i < costSize) && (!m_cancel); ++i)
{
ComputeConvexHull(m_convexHulls[p2], m_convexHulls[i], pts, &combinedCH);
costMatrix[rowIdx] = ComputeConcavity(volume1 + m_convexHulls[i]->ComputeVolume(), combinedCH.ComputeVolume(), m_volumeCH0);
rowIdx += i;
assert(rowIdx >= 0);
}
// Move the top column in to replace its space
const size_t erase_idx = ((costSize - 1) * costSize) >> 1;
if (p1 < costSize) {
rowIdx = (addrI * p1) >> 1;
size_t top_row = erase_idx;
for (size_t i = 0; i < p1; ++i) {
if (i != p2) {
costMatrix[rowIdx] = costMatrix[top_row];
}
++rowIdx;
++top_row;
}
++top_row;
rowIdx += p1;
for (size_t i = p1 + 1; i < (costSize + 1); ++i) {
costMatrix[rowIdx] = costMatrix[top_row++];
rowIdx += i;
assert(rowIdx >= 0);
}
}
costMatrix.Resize(erase_idx);
}
}
m_overallProgress = 99.0;
Update(100.0, 100.0, params);
m_timer.Toc();
if (params.m_logger) {
msg.str("");
msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl;
params.m_logger->Log(msg.str().c_str());
}
}
void VHACD::SimplifyConvexHull(Mesh* const ch, const size_t nvertices, const double minVolume)
{
if (nvertices <= 4) {
return;
}
ICHull icHull;
if (mRaycastMesh)
{
// We project these points onto the original source mesh to increase precision
// The voxelization process drops floating point precision so returned data points are not exactly lying on the
// surface of the original source mesh.
// The first step is we need to compute the bounding box of the mesh we are trying to build a convex hull for.
// From this bounding box, we compute the length of the diagonal to get a relative size and center for point projection
uint32_t nPoints = ch->GetNPoints();
Vec3<double> *inputPoints = ch->GetPointsBuffer();
Vec3<double> bmin(inputPoints[0]);
Vec3<double> bmax(inputPoints[1]);
for (uint32_t i = 1; i < nPoints; i++)
{
const Vec3<double> &p = inputPoints[i];
p.UpdateMinMax(bmin, bmax);
}
Vec3<double> center;
double diagonalLength = center.GetCenter(bmin, bmax); // Get the center of the bounding box
// This is the error threshold for determining if we should use the raycast result data point vs. the voxelized result.
double pointDistanceThreshold = diagonalLength * 0.05;
// If a new point is within 1/100th the diagonal length of the bounding volume we do not add it. To do so would create a
// thin sliver in the resulting convex hull
double snapDistanceThreshold = diagonalLength * 0.01;
double snapDistanceThresholdSquared = snapDistanceThreshold*snapDistanceThreshold;
// Allocate buffer for projected vertices
Vec3<double> *outputPoints = new Vec3<double>[nPoints];
uint32_t outCount = 0;
for (uint32_t i = 0; i < nPoints; i++)
{
Vec3<double> &inputPoint = inputPoints[i];
Vec3<double> &outputPoint = outputPoints[outCount];
// Compute the direction vector from the center of this mesh to the vertex
Vec3<double> dir = inputPoint - center;
// Normalize the direction vector.
dir.Normalize();
// Multiply times the diagonal length of the mesh
dir *= diagonalLength;
// Add the center back in again to get the destination point
dir += center;
// By default the output point is equal to the input point
outputPoint = inputPoint;
double pointDistance;
if (mRaycastMesh->raycast(center.GetData(), dir.GetData(), inputPoint.GetData(), outputPoint.GetData(),&pointDistance) )
{
// If the nearest intersection point is too far away, we keep the original source data point.
// Not all points lie directly on the original mesh surface
if (pointDistance > pointDistanceThreshold)
{
outputPoint = inputPoint;
}
}
// Ok, before we add this point, we do not want to create points which are extremely close to each other.
// This will result in tiny sliver triangles which are really bad for collision detection.
bool foundNearbyPoint = false;
for (uint32_t j = 0; j < outCount; j++)
{
// If this new point is extremely close to an existing point, we do not add it!
double squaredDistance = outputPoints[j].GetDistanceSquared(outputPoint);
if (squaredDistance < snapDistanceThresholdSquared )
{
foundNearbyPoint = true;
break;
}
}
if (!foundNearbyPoint)
{
outCount++;
}
}
icHull.AddPoints(outputPoints, outCount);
delete[]outputPoints;
}
else
{
icHull.AddPoints(ch->GetPointsBuffer(), ch->GetNPoints());
}
icHull.Process((uint32_t)nvertices, minVolume);
TMMesh& mesh = icHull.GetMesh();
const size_t nT = mesh.GetNTriangles();
const size_t nV = mesh.GetNVertices();
ch->ResizePoints(nV);
ch->ResizeTriangles(nT);
mesh.GetIFS(ch->GetPointsBuffer(), ch->GetTrianglesBuffer());
}
void VHACD::SimplifyConvexHulls(const Parameters& params)
{
if (m_cancel || params.m_maxNumVerticesPerCH < 4) {
return;
}
m_timer.Tic();
m_stage = "Simplify convex-hulls";
m_operation = "Simplify convex-hulls";
std::ostringstream msg;
const size_t nConvexHulls = m_convexHulls.Size();
if (params.m_logger) {
msg << "+ Simplify " << nConvexHulls << " convex-hulls " << std::endl;
params.m_logger->Log(msg.str().c_str());
}
Update(0.0, 0.0, params);
for (size_t i = 0; i < nConvexHulls && !m_cancel; ++i) {
if (params.m_logger) {
msg.str("");
msg << "\t\t Simplify CH[" << std::setfill('0') << std::setw(5) << i << "] " << m_convexHulls[i]->GetNPoints() << " V, " << m_convexHulls[i]->GetNTriangles() << " T" << std::endl;
params.m_logger->Log(msg.str().c_str());
}
SimplifyConvexHull(m_convexHulls[i], params.m_maxNumVerticesPerCH, m_volumeCH0 * params.m_minVolumePerCH);
}
m_overallProgress = 100.0;
Update(100.0, 100.0, params);
m_timer.Toc();
if (params.m_logger) {
msg.str("");
msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl;
params.m_logger->Log(msg.str().c_str());
}
}
bool VHACD::ComputeCenterOfMass(double centerOfMass[3]) const
{
bool ret = false;
centerOfMass[0] = 0;
centerOfMass[1] = 0;
centerOfMass[2] = 0;
// Get number of convex hulls in the result
uint32_t hullCount = GetNConvexHulls();
if (hullCount) // if we have results
{
ret = true;
double totalVolume = 0;
// Initialize the center of mass to zero
centerOfMass[0] = 0;
centerOfMass[1] = 0;
centerOfMass[2] = 0;
// Compute the total volume of all convex hulls
for (uint32_t i = 0; i < hullCount; i++)
{
ConvexHull ch;
GetConvexHull(i, ch);
totalVolume += ch.m_volume;
}
// compute the reciprocal of the total volume
double recipVolume = 1.0 / totalVolume;
// Add in the weighted by volume average of the center point of each convex hull
for (uint32_t i = 0; i < hullCount; i++)
{
ConvexHull ch;
GetConvexHull(i, ch);
double ratio = ch.m_volume*recipVolume;
centerOfMass[0] += ch.m_center[0] * ratio;
centerOfMass[1] += ch.m_center[1] * ratio;
centerOfMass[2] += ch.m_center[2] * ratio;
}
}
return ret;
}
#pragma warning(disable:4189 4101)
// Will analyze the HACD results and compute the constraints solutions.
// It will analyze the point at which any two convex hulls touch each other and
// return the total number of constraint pairs found
uint32_t VHACD::ComputeConstraints(void)
{
mConstraints.clear(); // erase any previous constraint results
uint32_t hullCount = GetNConvexHulls(); // get the number of convex hulls in the results
if (hullCount == 0)
return 0;
#if DEBUG_VISUALIZE_CONSTRAINTS
gRenderDebug->pushRenderState();
gRenderDebug->setCurrentDisplayTime(10);
#endif
// We voxelize the convex hull
class HullData
{
public:
HullData(void)
{
FLOAT_MATH::fm_initMinMax(mBmin, mBmax);
}
~HullData(void)
{
FLOAT_MATH::fm_releaseVertexIndex(mVertexIndex);
FLOAT_MATH::fm_releaseTesselate(mTesselate);
delete[]mIndices;
}
void computeResolution(void)
{
mDiagonalDistance = FLOAT_MATH::fm_distance(mBmin, mBmax);
mTessellateDistance = mDiagonalDistance / 20;
mNearestPointDistance = mDiagonalDistance / 20.0f;
mPointResolution = mDiagonalDistance / 100;
mVertexIndex = FLOAT_MATH::fm_createVertexIndex(mPointResolution, false);
mTesselate = FLOAT_MATH::fm_createTesselate();
}
void computeTesselation(void)
{
mTesselationIndices = mTesselate->tesselate(mVertexIndex, mSourceTriangleCount, mIndices, mTessellateDistance, 6, mTessellateTriangleCount);
uint32_t vcount = mVertexIndex->getVcount();
}
bool getNearestVert(const double sourcePoint[3],
double nearest[3],
const HullData &other,
double nearestThreshold)
{
bool ret = false;
double nt2 = nearestThreshold*nearestThreshold;
uint32_t vcount = other.mVertexIndex->getVcount();
for (uint32_t i = 0; i < vcount; i++)
{
const double *p = other.mVertexIndex->getVertexDouble(i);
double d2 = FLOAT_MATH::fm_distanceSquared(sourcePoint, p);
if (d2 < nt2)
{
nearest[0] = p[0];
nearest[1] = p[1];
nearest[2] = p[2];
nt2 = d2;
ret = true;
}
}
return ret;
}
void findMatchingPoints(const HullData &other)
{
uint32_t vcount = mVertexIndex->getVcount();
for (uint32_t i = 0; i < vcount; i++)
{
const double *sourcePoint = mVertexIndex->getVertexDouble(i);
double nearestPoint[3];
if (getNearestVert(sourcePoint, nearestPoint, other, mNearestPointDistance))
{
#if DEBUG_VISUALIZE_CONSTRAINTS
float fp1[3];
float fp2[3];
FLOAT_MATH::fm_doubleToFloat3(sourcePoint, fp1);
FLOAT_MATH::fm_doubleToFloat3(nearestPoint, fp2);
gRenderDebug->debugRay(fp1, fp2);
#endif
}
}
}
double mBmin[3];
double mBmax[3];
double mDiagonalDistance;
double mTessellateDistance;
double mPointResolution;
double mNearestPointDistance;
uint32_t mSourceTriangleCount{ 0 };
uint32_t mTessellateTriangleCount{ 0 };
uint32_t *mIndices{ nullptr };
FLOAT_MATH::fm_VertexIndex *mVertexIndex{ nullptr };
FLOAT_MATH::fm_Tesselate *mTesselate{ nullptr };
const uint32_t *mTesselationIndices{ nullptr };
};
HullData *hullData = new HullData[hullCount];
for (uint32_t i = 0; i < hullCount; i++)
{
HullData &hd = hullData[i];
ConvexHull ch;
GetConvexHull(i, ch);
// Compute the bounding volume of this convex hull
for (uint32_t j = 0; j < ch.m_nPoints; j++)
{
const double *p = &ch.m_points[j * 3];
FLOAT_MATH::fm_minmax(p, hd.mBmin, hd.mBmax);
}
hd.computeResolution(); // Compute the tessellation resolution
uint32_t tcount = ch.m_nTriangles;
hd.mSourceTriangleCount = tcount;
hd.mIndices = new uint32_t[tcount * 3];
for (uint32_t j = 0; j < tcount; j++)
{
uint32_t i1 = ch.m_triangles[j * 3 + 0];
uint32_t i2 = ch.m_triangles[j * 3 + 1];
uint32_t i3 = ch.m_triangles[j * 3 + 2];
const double *p1 = &ch.m_points[i1 * 3];
const double *p2 = &ch.m_points[i2 * 3];
const double *p3 = &ch.m_points[i3 * 3];
bool newPos;
hd.mIndices[j * 3 + 0] = hd.mVertexIndex->getIndex(p1, newPos);
hd.mIndices[j * 3 + 1] = hd.mVertexIndex->getIndex(p2, newPos);
hd.mIndices[j * 3 + 2] = hd.mVertexIndex->getIndex(p3, newPos);
}
hd.computeTesselation();
}
for (uint32_t i = 0; i < hullCount; i++)
{
HullData &hd = hullData[i];
// Slightly inflate the bounding box around each convex hull for intersection tests
// during the constraint building phase
FLOAT_MATH::fm_inflateMinMax(hd.mBmin, hd.mBmax, 0.05f);
}
// Look for every possible pair of convex hulls as possible constraints
for (uint32_t i = 0; i < hullCount; i++)
{
HullData &hd1 = hullData[i];
for (uint32_t j = i + 1; j < hullCount; j++)
{
HullData &hd2 = hullData[j];
if (FLOAT_MATH::fm_intersectAABB(hd1.mBmin, hd1.mBmax, hd2.mBmin, hd2.mBmax))
{
// ok. if two convex hulls intersect, we are going to find the <n> number of nearest
// matching points between them.
hd1.findMatchingPoints(hd2);
}
}
}
#if DEBUG_VISUALIZE_CONSTRAINTS
gRenderDebug->popRenderState();
#endif
return uint32_t(mConstraints.size());
}
// Returns a pointer to the constraint index; null if the index is not valid or
// the user did not previously call 'ComputeConstraints'
const VHACD::IVHACD::Constraint *VHACD::GetConstraint(uint32_t index) const
{
const Constraint *ret = nullptr;
if (index < mConstraints.size())
{
ret = &mConstraints[index];
}
return ret;
}
} // end of VHACD namespace | 69,088 | C++ | 37.727018 | 756 | 0.521046 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/btConvexHullComputer.cpp | /*
Copyright (c) 2011 Ole Kniemeyer, MAXON, www.maxon.net
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include <string.h>
#include "btAlignedObjectArray.h"
#include "btConvexHullComputer.h"
#include "btMinMax.h"
#include "btVector3.h"
#ifdef __GNUC__
#include <stdint.h>
#elif defined(_MSC_VER)
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
#else
typedef int32_t int32_t;
typedef long long int32_t int64_t;
typedef uint32_t uint32_t;
typedef unsigned long long int32_t uint64_t;
#endif
#ifdef _MSC_VER
#pragma warning(disable:4458)
#endif
//The definition of USE_X86_64_ASM is moved into the build system. You can enable it manually by commenting out the following lines
//#if (defined(__GNUC__) && defined(__x86_64__) && !defined(__ICL)) // || (defined(__ICL) && defined(_M_X64)) bug in Intel compiler, disable inline assembly
// #define USE_X86_64_ASM
//#endif
//#define DEBUG_CONVEX_HULL
//#define SHOW_ITERATIONS
#if defined(DEBUG_CONVEX_HULL) || defined(SHOW_ITERATIONS)
#include <stdio.h>
#endif
// Convex hull implementation based on Preparata and Hong
// Ole Kniemeyer, MAXON Computer GmbH
class btConvexHullInternal {
public:
class Point64 {
public:
int64_t x;
int64_t y;
int64_t z;
Point64(int64_t x, int64_t y, int64_t z)
: x(x)
, y(y)
, z(z)
{
}
bool isZero()
{
return (x == 0) && (y == 0) && (z == 0);
}
int64_t dot(const Point64& b) const
{
return x * b.x + y * b.y + z * b.z;
}
};
class Point32 {
public:
int32_t x;
int32_t y;
int32_t z;
int32_t index;
Point32()
{
}
Point32(int32_t x, int32_t y, int32_t z)
: x(x)
, y(y)
, z(z)
, index(-1)
{
}
bool operator==(const Point32& b) const
{
return (x == b.x) && (y == b.y) && (z == b.z);
}
bool operator!=(const Point32& b) const
{
return (x != b.x) || (y != b.y) || (z != b.z);
}
bool isZero()
{
return (x == 0) && (y == 0) && (z == 0);
}
Point64 cross(const Point32& b) const
{
return Point64(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x);
}
Point64 cross(const Point64& b) const
{
return Point64(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x);
}
int64_t dot(const Point32& b) const
{
return x * b.x + y * b.y + z * b.z;
}
int64_t dot(const Point64& b) const
{
return x * b.x + y * b.y + z * b.z;
}
Point32 operator+(const Point32& b) const
{
return Point32(x + b.x, y + b.y, z + b.z);
}
Point32 operator-(const Point32& b) const
{
return Point32(x - b.x, y - b.y, z - b.z);
}
};
class Int128 {
public:
uint64_t low;
uint64_t high;
Int128()
{
}
Int128(uint64_t low, uint64_t high)
: low(low)
, high(high)
{
}
Int128(uint64_t low)
: low(low)
, high(0)
{
}
Int128(int64_t value)
: low(value)
, high((value >= 0) ? 0 : (uint64_t)-1LL)
{
}
static Int128 mul(int64_t a, int64_t b);
static Int128 mul(uint64_t a, uint64_t b);
Int128 operator-() const
{
return Int128((uint64_t) - (int64_t)low, ~high + (low == 0));
}
Int128 operator+(const Int128& b) const
{
#ifdef USE_X86_64_ASM
Int128 result;
__asm__("addq %[bl], %[rl]\n\t"
"adcq %[bh], %[rh]\n\t"
: [rl] "=r"(result.low), [rh] "=r"(result.high)
: "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high)
: "cc");
return result;
#else
uint64_t lo = low + b.low;
return Int128(lo, high + b.high + (lo < low));
#endif
}
Int128 operator-(const Int128& b) const
{
#ifdef USE_X86_64_ASM
Int128 result;
__asm__("subq %[bl], %[rl]\n\t"
"sbbq %[bh], %[rh]\n\t"
: [rl] "=r"(result.low), [rh] "=r"(result.high)
: "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high)
: "cc");
return result;
#else
return *this + -b;
#endif
}
Int128& operator+=(const Int128& b)
{
#ifdef USE_X86_64_ASM
__asm__("addq %[bl], %[rl]\n\t"
"adcq %[bh], %[rh]\n\t"
: [rl] "=r"(low), [rh] "=r"(high)
: "0"(low), "1"(high), [bl] "g"(b.low), [bh] "g"(b.high)
: "cc");
#else
uint64_t lo = low + b.low;
if (lo < low) {
++high;
}
low = lo;
high += b.high;
#endif
return *this;
}
Int128& operator++()
{
if (++low == 0) {
++high;
}
return *this;
}
Int128 operator*(int64_t b) const;
btScalar toScalar() const
{
return ((int64_t)high >= 0) ? btScalar(high) * (btScalar(0x100000000LL) * btScalar(0x100000000LL)) + btScalar(low)
: -(-*this).toScalar();
}
int32_t getSign() const
{
return ((int64_t)high < 0) ? -1 : (high || low) ? 1 : 0;
}
bool operator<(const Int128& b) const
{
return (high < b.high) || ((high == b.high) && (low < b.low));
}
int32_t ucmp(const Int128& b) const
{
if (high < b.high) {
return -1;
}
if (high > b.high) {
return 1;
}
if (low < b.low) {
return -1;
}
if (low > b.low) {
return 1;
}
return 0;
}
};
class Rational64 {
private:
uint64_t m_numerator;
uint64_t m_denominator;
int32_t sign;
public:
Rational64(int64_t numerator, int64_t denominator)
{
if (numerator > 0) {
sign = 1;
m_numerator = (uint64_t)numerator;
}
else if (numerator < 0) {
sign = -1;
m_numerator = (uint64_t)-numerator;
}
else {
sign = 0;
m_numerator = 0;
}
if (denominator > 0) {
m_denominator = (uint64_t)denominator;
}
else if (denominator < 0) {
sign = -sign;
m_denominator = (uint64_t)-denominator;
}
else {
m_denominator = 0;
}
}
bool isNegativeInfinity() const
{
return (sign < 0) && (m_denominator == 0);
}
bool isNaN() const
{
return (sign == 0) && (m_denominator == 0);
}
int32_t compare(const Rational64& b) const;
btScalar toScalar() const
{
return sign * ((m_denominator == 0) ? SIMD_INFINITY : (btScalar)m_numerator / m_denominator);
}
};
class Rational128 {
private:
Int128 numerator;
Int128 denominator;
int32_t sign;
bool isInt64;
public:
Rational128(int64_t value)
{
if (value > 0) {
sign = 1;
this->numerator = value;
}
else if (value < 0) {
sign = -1;
this->numerator = -value;
}
else {
sign = 0;
this->numerator = (uint64_t)0;
}
this->denominator = (uint64_t)1;
isInt64 = true;
}
Rational128(const Int128& numerator, const Int128& denominator)
{
sign = numerator.getSign();
if (sign >= 0) {
this->numerator = numerator;
}
else {
this->numerator = -numerator;
}
int32_t dsign = denominator.getSign();
if (dsign >= 0) {
this->denominator = denominator;
}
else {
sign = -sign;
this->denominator = -denominator;
}
isInt64 = false;
}
int32_t compare(const Rational128& b) const;
int32_t compare(int64_t b) const;
btScalar toScalar() const
{
return sign * ((denominator.getSign() == 0) ? SIMD_INFINITY : numerator.toScalar() / denominator.toScalar());
}
};
class PointR128 {
public:
Int128 x;
Int128 y;
Int128 z;
Int128 denominator;
PointR128()
{
}
PointR128(Int128 x, Int128 y, Int128 z, Int128 denominator)
: x(x)
, y(y)
, z(z)
, denominator(denominator)
{
}
btScalar xvalue() const
{
return x.toScalar() / denominator.toScalar();
}
btScalar yvalue() const
{
return y.toScalar() / denominator.toScalar();
}
btScalar zvalue() const
{
return z.toScalar() / denominator.toScalar();
}
};
class Edge;
class Face;
class Vertex {
public:
Vertex* next;
Vertex* prev;
Edge* edges;
Face* firstNearbyFace;
Face* lastNearbyFace;
PointR128 point128;
Point32 point;
int32_t copy;
Vertex()
: next(NULL)
, prev(NULL)
, edges(NULL)
, firstNearbyFace(NULL)
, lastNearbyFace(NULL)
, copy(-1)
{
}
#ifdef DEBUG_CONVEX_HULL
void print()
{
printf("V%d (%d, %d, %d)", point.index, point.x, point.y, point.z);
}
void printGraph();
#endif
Point32 operator-(const Vertex& b) const
{
return point - b.point;
}
Rational128 dot(const Point64& b) const
{
return (point.index >= 0) ? Rational128(point.dot(b))
: Rational128(point128.x * b.x + point128.y * b.y + point128.z * b.z, point128.denominator);
}
btScalar xvalue() const
{
return (point.index >= 0) ? btScalar(point.x) : point128.xvalue();
}
btScalar yvalue() const
{
return (point.index >= 0) ? btScalar(point.y) : point128.yvalue();
}
btScalar zvalue() const
{
return (point.index >= 0) ? btScalar(point.z) : point128.zvalue();
}
void receiveNearbyFaces(Vertex* src)
{
if (lastNearbyFace) {
lastNearbyFace->nextWithSameNearbyVertex = src->firstNearbyFace;
}
else {
firstNearbyFace = src->firstNearbyFace;
}
if (src->lastNearbyFace) {
lastNearbyFace = src->lastNearbyFace;
}
for (Face* f = src->firstNearbyFace; f; f = f->nextWithSameNearbyVertex) {
btAssert(f->nearbyVertex == src);
f->nearbyVertex = this;
}
src->firstNearbyFace = NULL;
src->lastNearbyFace = NULL;
}
};
class Edge {
public:
Edge* next;
Edge* prev;
Edge* reverse;
Vertex* target;
Face* face;
int32_t copy;
~Edge()
{
next = NULL;
prev = NULL;
reverse = NULL;
target = NULL;
face = NULL;
}
void link(Edge* n)
{
btAssert(reverse->target == n->reverse->target);
next = n;
n->prev = this;
}
#ifdef DEBUG_CONVEX_HULL
void print()
{
printf("E%p : %d -> %d, n=%p p=%p (0 %d\t%d\t%d) -> (%d %d %d)", this, reverse->target->point.index, target->point.index, next, prev,
reverse->target->point.x, reverse->target->point.y, reverse->target->point.z, target->point.x, target->point.y, target->point.z);
}
#endif
};
class Face {
public:
Face* next;
Vertex* nearbyVertex;
Face* nextWithSameNearbyVertex;
Point32 origin;
Point32 dir0;
Point32 dir1;
Face()
: next(NULL)
, nearbyVertex(NULL)
, nextWithSameNearbyVertex(NULL)
{
}
void init(Vertex* a, Vertex* b, Vertex* c)
{
nearbyVertex = a;
origin = a->point;
dir0 = *b - *a;
dir1 = *c - *a;
if (a->lastNearbyFace) {
a->lastNearbyFace->nextWithSameNearbyVertex = this;
}
else {
a->firstNearbyFace = this;
}
a->lastNearbyFace = this;
}
Point64 getNormal()
{
return dir0.cross(dir1);
}
};
template <typename UWord, typename UHWord>
class DMul {
private:
static uint32_t high(uint64_t value)
{
return (uint32_t)(value >> 32);
}
static uint32_t low(uint64_t value)
{
return (uint32_t)value;
}
static uint64_t mul(uint32_t a, uint32_t b)
{
return (uint64_t)a * (uint64_t)b;
}
static void shlHalf(uint64_t& value)
{
value <<= 32;
}
static uint64_t high(Int128 value)
{
return value.high;
}
static uint64_t low(Int128 value)
{
return value.low;
}
static Int128 mul(uint64_t a, uint64_t b)
{
return Int128::mul(a, b);
}
static void shlHalf(Int128& value)
{
value.high = value.low;
value.low = 0;
}
public:
static void mul(UWord a, UWord b, UWord& resLow, UWord& resHigh)
{
UWord p00 = mul(low(a), low(b));
UWord p01 = mul(low(a), high(b));
UWord p10 = mul(high(a), low(b));
UWord p11 = mul(high(a), high(b));
UWord p0110 = UWord(low(p01)) + UWord(low(p10));
p11 += high(p01);
p11 += high(p10);
p11 += high(p0110);
shlHalf(p0110);
p00 += p0110;
if (p00 < p0110) {
++p11;
}
resLow = p00;
resHigh = p11;
}
};
private:
class IntermediateHull {
public:
Vertex* minXy;
Vertex* maxXy;
Vertex* minYx;
Vertex* maxYx;
IntermediateHull()
: minXy(NULL)
, maxXy(NULL)
, minYx(NULL)
, maxYx(NULL)
{
}
void print();
};
enum Orientation { NONE,
CLOCKWISE,
COUNTER_CLOCKWISE };
template <typename T>
class PoolArray {
private:
T* array;
int32_t size;
public:
PoolArray<T>* next;
PoolArray(int32_t size)
: size(size)
, next(NULL)
{
array = (T*)btAlignedAlloc(sizeof(T) * size, 16);
}
~PoolArray()
{
btAlignedFree(array);
}
T* init()
{
T* o = array;
for (int32_t i = 0; i < size; i++, o++) {
o->next = (i + 1 < size) ? o + 1 : NULL;
}
return array;
}
};
template <typename T>
class Pool {
private:
PoolArray<T>* arrays;
PoolArray<T>* nextArray;
T* freeObjects;
int32_t arraySize;
public:
Pool()
: arrays(NULL)
, nextArray(NULL)
, freeObjects(NULL)
, arraySize(256)
{
}
~Pool()
{
while (arrays) {
PoolArray<T>* p = arrays;
arrays = p->next;
p->~PoolArray<T>();
btAlignedFree(p);
}
}
void reset()
{
nextArray = arrays;
freeObjects = NULL;
}
void setArraySize(int32_t arraySize)
{
this->arraySize = arraySize;
}
T* newObject()
{
T* o = freeObjects;
if (!o) {
PoolArray<T>* p = nextArray;
if (p) {
nextArray = p->next;
}
else {
p = new (btAlignedAlloc(sizeof(PoolArray<T>), 16)) PoolArray<T>(arraySize);
p->next = arrays;
arrays = p;
}
o = p->init();
}
freeObjects = o->next;
return new (o) T();
};
void freeObject(T* object)
{
object->~T();
object->next = freeObjects;
freeObjects = object;
}
};
btVector3 scaling;
btVector3 center;
Pool<Vertex> vertexPool;
Pool<Edge> edgePool;
Pool<Face> facePool;
btAlignedObjectArray<Vertex*> originalVertices;
int32_t mergeStamp;
int32_t minAxis;
int32_t medAxis;
int32_t maxAxis;
int32_t usedEdgePairs;
int32_t maxUsedEdgePairs;
static Orientation getOrientation(const Edge* prev, const Edge* next, const Point32& s, const Point32& t);
Edge* findMaxAngle(bool ccw, const Vertex* start, const Point32& s, const Point64& rxs, const Point64& sxrxs, Rational64& minCot);
void findEdgeForCoplanarFaces(Vertex* c0, Vertex* c1, Edge*& e0, Edge*& e1, Vertex* stop0, Vertex* stop1);
Edge* newEdgePair(Vertex* from, Vertex* to);
void removeEdgePair(Edge* edge)
{
Edge* n = edge->next;
Edge* r = edge->reverse;
btAssert(edge->target && r->target);
if (n != edge) {
n->prev = edge->prev;
edge->prev->next = n;
r->target->edges = n;
}
else {
r->target->edges = NULL;
}
n = r->next;
if (n != r) {
n->prev = r->prev;
r->prev->next = n;
edge->target->edges = n;
}
else {
edge->target->edges = NULL;
}
edgePool.freeObject(edge);
edgePool.freeObject(r);
usedEdgePairs--;
}
void computeInternal(int32_t start, int32_t end, IntermediateHull& result);
bool mergeProjection(IntermediateHull& h0, IntermediateHull& h1, Vertex*& c0, Vertex*& c1);
void merge(IntermediateHull& h0, IntermediateHull& h1);
btVector3 toBtVector(const Point32& v);
btVector3 getBtNormal(Face* face);
bool shiftFace(Face* face, btScalar amount, btAlignedObjectArray<Vertex*> stack);
public:
Vertex* vertexList;
void compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count);
btVector3 getCoordinates(const Vertex* v);
btScalar shrink(btScalar amount, btScalar clampAmount);
};
btConvexHullInternal::Int128 btConvexHullInternal::Int128::operator*(int64_t b) const
{
bool negative = (int64_t)high < 0;
Int128 a = negative ? -*this : *this;
if (b < 0) {
negative = !negative;
b = -b;
}
Int128 result = mul(a.low, (uint64_t)b);
result.high += a.high * (uint64_t)b;
return negative ? -result : result;
}
btConvexHullInternal::Int128 btConvexHullInternal::Int128::mul(int64_t a, int64_t b)
{
Int128 result;
#ifdef USE_X86_64_ASM
__asm__("imulq %[b]"
: "=a"(result.low), "=d"(result.high)
: "0"(a), [b] "r"(b)
: "cc");
return result;
#else
bool negative = a < 0;
if (negative) {
a = -a;
}
if (b < 0) {
negative = !negative;
b = -b;
}
DMul<uint64_t, uint32_t>::mul((uint64_t)a, (uint64_t)b, result.low, result.high);
return negative ? -result : result;
#endif
}
btConvexHullInternal::Int128 btConvexHullInternal::Int128::mul(uint64_t a, uint64_t b)
{
Int128 result;
#ifdef USE_X86_64_ASM
__asm__("mulq %[b]"
: "=a"(result.low), "=d"(result.high)
: "0"(a), [b] "r"(b)
: "cc");
#else
DMul<uint64_t, uint32_t>::mul(a, b, result.low, result.high);
#endif
return result;
}
int32_t btConvexHullInternal::Rational64::compare(const Rational64& b) const
{
if (sign != b.sign) {
return sign - b.sign;
}
else if (sign == 0) {
return 0;
}
// return (numerator * b.denominator > b.numerator * denominator) ? sign : (numerator * b.denominator < b.numerator * denominator) ? -sign : 0;
#ifdef USE_X86_64_ASM
int32_t result;
int64_t tmp;
int64_t dummy;
__asm__("mulq %[bn]\n\t"
"movq %%rax, %[tmp]\n\t"
"movq %%rdx, %%rbx\n\t"
"movq %[tn], %%rax\n\t"
"mulq %[bd]\n\t"
"subq %[tmp], %%rax\n\t"
"sbbq %%rbx, %%rdx\n\t" // rdx:rax contains 128-bit-difference "numerator*b.denominator - b.numerator*denominator"
"setnsb %%bh\n\t" // bh=1 if difference is non-negative, bh=0 otherwise
"orq %%rdx, %%rax\n\t"
"setnzb %%bl\n\t" // bl=1 if difference if non-zero, bl=0 if it is zero
"decb %%bh\n\t" // now bx=0x0000 if difference is zero, 0xff01 if it is negative, 0x0001 if it is positive (i.e., same sign as difference)
"shll $16, %%ebx\n\t" // ebx has same sign as difference
: "=&b"(result), [tmp] "=&r"(tmp), "=a"(dummy)
: "a"(denominator), [bn] "g"(b.numerator), [tn] "g"(numerator), [bd] "g"(b.denominator)
: "%rdx", "cc");
return result ? result ^ sign // if sign is +1, only bit 0 of result is inverted, which does not change the sign of result (and cannot result in zero)
// if sign is -1, all bits of result are inverted, which changes the sign of result (and again cannot result in zero)
: 0;
#else
return sign * Int128::mul(m_numerator, b.m_denominator).ucmp(Int128::mul(m_denominator, b.m_numerator));
#endif
}
int32_t btConvexHullInternal::Rational128::compare(const Rational128& b) const
{
if (sign != b.sign) {
return sign - b.sign;
}
else if (sign == 0) {
return 0;
}
if (isInt64) {
return -b.compare(sign * (int64_t)numerator.low);
}
Int128 nbdLow, nbdHigh, dbnLow, dbnHigh;
DMul<Int128, uint64_t>::mul(numerator, b.denominator, nbdLow, nbdHigh);
DMul<Int128, uint64_t>::mul(denominator, b.numerator, dbnLow, dbnHigh);
int32_t cmp = nbdHigh.ucmp(dbnHigh);
if (cmp) {
return cmp * sign;
}
return nbdLow.ucmp(dbnLow) * sign;
}
int32_t btConvexHullInternal::Rational128::compare(int64_t b) const
{
if (isInt64) {
int64_t a = sign * (int64_t)numerator.low;
return (a > b) ? 1 : (a < b) ? -1 : 0;
}
if (b > 0) {
if (sign <= 0) {
return -1;
}
}
else if (b < 0) {
if (sign >= 0) {
return 1;
}
b = -b;
}
else {
return sign;
}
return numerator.ucmp(denominator * b) * sign;
}
btConvexHullInternal::Edge* btConvexHullInternal::newEdgePair(Vertex* from, Vertex* to)
{
btAssert(from && to);
Edge* e = edgePool.newObject();
Edge* r = edgePool.newObject();
e->reverse = r;
r->reverse = e;
e->copy = mergeStamp;
r->copy = mergeStamp;
e->target = to;
r->target = from;
e->face = NULL;
r->face = NULL;
usedEdgePairs++;
if (usedEdgePairs > maxUsedEdgePairs) {
maxUsedEdgePairs = usedEdgePairs;
}
return e;
}
bool btConvexHullInternal::mergeProjection(IntermediateHull& h0, IntermediateHull& h1, Vertex*& c0, Vertex*& c1)
{
Vertex* v0 = h0.maxYx;
Vertex* v1 = h1.minYx;
if ((v0->point.x == v1->point.x) && (v0->point.y == v1->point.y)) {
btAssert(v0->point.z < v1->point.z);
Vertex* v1p = v1->prev;
if (v1p == v1) {
c0 = v0;
if (v1->edges) {
btAssert(v1->edges->next == v1->edges);
v1 = v1->edges->target;
btAssert(v1->edges->next == v1->edges);
}
c1 = v1;
return false;
}
Vertex* v1n = v1->next;
v1p->next = v1n;
v1n->prev = v1p;
if (v1 == h1.minXy) {
if ((v1n->point.x < v1p->point.x) || ((v1n->point.x == v1p->point.x) && (v1n->point.y < v1p->point.y))) {
h1.minXy = v1n;
}
else {
h1.minXy = v1p;
}
}
if (v1 == h1.maxXy) {
if ((v1n->point.x > v1p->point.x) || ((v1n->point.x == v1p->point.x) && (v1n->point.y > v1p->point.y))) {
h1.maxXy = v1n;
}
else {
h1.maxXy = v1p;
}
}
}
v0 = h0.maxXy;
v1 = h1.maxXy;
Vertex* v00 = NULL;
Vertex* v10 = NULL;
int32_t sign = 1;
for (int32_t side = 0; side <= 1; side++) {
int32_t dx = (v1->point.x - v0->point.x) * sign;
if (dx > 0) {
while (true) {
int32_t dy = v1->point.y - v0->point.y;
Vertex* w0 = side ? v0->next : v0->prev;
if (w0 != v0) {
int32_t dx0 = (w0->point.x - v0->point.x) * sign;
int32_t dy0 = w0->point.y - v0->point.y;
if ((dy0 <= 0) && ((dx0 == 0) || ((dx0 < 0) && (dy0 * dx <= dy * dx0)))) {
v0 = w0;
dx = (v1->point.x - v0->point.x) * sign;
continue;
}
}
Vertex* w1 = side ? v1->next : v1->prev;
if (w1 != v1) {
int32_t dx1 = (w1->point.x - v1->point.x) * sign;
int32_t dy1 = w1->point.y - v1->point.y;
int32_t dxn = (w1->point.x - v0->point.x) * sign;
if ((dxn > 0) && (dy1 < 0) && ((dx1 == 0) || ((dx1 < 0) && (dy1 * dx < dy * dx1)))) {
v1 = w1;
dx = dxn;
continue;
}
}
break;
}
}
else if (dx < 0) {
while (true) {
int32_t dy = v1->point.y - v0->point.y;
Vertex* w1 = side ? v1->prev : v1->next;
if (w1 != v1) {
int32_t dx1 = (w1->point.x - v1->point.x) * sign;
int32_t dy1 = w1->point.y - v1->point.y;
if ((dy1 >= 0) && ((dx1 == 0) || ((dx1 < 0) && (dy1 * dx <= dy * dx1)))) {
v1 = w1;
dx = (v1->point.x - v0->point.x) * sign;
continue;
}
}
Vertex* w0 = side ? v0->prev : v0->next;
if (w0 != v0) {
int32_t dx0 = (w0->point.x - v0->point.x) * sign;
int32_t dy0 = w0->point.y - v0->point.y;
int32_t dxn = (v1->point.x - w0->point.x) * sign;
if ((dxn < 0) && (dy0 > 0) && ((dx0 == 0) || ((dx0 < 0) && (dy0 * dx < dy * dx0)))) {
v0 = w0;
dx = dxn;
continue;
}
}
break;
}
}
else {
int32_t x = v0->point.x;
int32_t y0 = v0->point.y;
Vertex* w0 = v0;
Vertex* t;
while (((t = side ? w0->next : w0->prev) != v0) && (t->point.x == x) && (t->point.y <= y0)) {
w0 = t;
y0 = t->point.y;
}
v0 = w0;
int32_t y1 = v1->point.y;
Vertex* w1 = v1;
while (((t = side ? w1->prev : w1->next) != v1) && (t->point.x == x) && (t->point.y >= y1)) {
w1 = t;
y1 = t->point.y;
}
v1 = w1;
}
if (side == 0) {
v00 = v0;
v10 = v1;
v0 = h0.minXy;
v1 = h1.minXy;
sign = -1;
}
}
v0->prev = v1;
v1->next = v0;
v00->next = v10;
v10->prev = v00;
if (h1.minXy->point.x < h0.minXy->point.x) {
h0.minXy = h1.minXy;
}
if (h1.maxXy->point.x >= h0.maxXy->point.x) {
h0.maxXy = h1.maxXy;
}
h0.maxYx = h1.maxYx;
c0 = v00;
c1 = v10;
return true;
}
void btConvexHullInternal::computeInternal(int32_t start, int32_t end, IntermediateHull& result)
{
int32_t n = end - start;
switch (n) {
case 0:
result.minXy = NULL;
result.maxXy = NULL;
result.minYx = NULL;
result.maxYx = NULL;
return;
case 2: {
Vertex* v = originalVertices[start];
Vertex* w = v + 1;
if (v->point != w->point) {
int32_t dx = v->point.x - w->point.x;
int32_t dy = v->point.y - w->point.y;
if ((dx == 0) && (dy == 0)) {
if (v->point.z > w->point.z) {
Vertex* t = w;
w = v;
v = t;
}
btAssert(v->point.z < w->point.z);
v->next = v;
v->prev = v;
result.minXy = v;
result.maxXy = v;
result.minYx = v;
result.maxYx = v;
}
else {
v->next = w;
v->prev = w;
w->next = v;
w->prev = v;
if ((dx < 0) || ((dx == 0) && (dy < 0))) {
result.minXy = v;
result.maxXy = w;
}
else {
result.minXy = w;
result.maxXy = v;
}
if ((dy < 0) || ((dy == 0) && (dx < 0))) {
result.minYx = v;
result.maxYx = w;
}
else {
result.minYx = w;
result.maxYx = v;
}
}
Edge* e = newEdgePair(v, w);
e->link(e);
v->edges = e;
e = e->reverse;
e->link(e);
w->edges = e;
return;
}
#if defined(__GNUC__)
goto fallthrough; // Needed to silence gcc
#endif
}
#if defined(__GNUC__)
fallthrough: // Needed to silence gcc
#endif
// lint -fallthrough
case 1: {
Vertex* v = originalVertices[start];
v->edges = NULL;
v->next = v;
v->prev = v;
result.minXy = v;
result.maxXy = v;
result.minYx = v;
result.maxYx = v;
return;
}
}
int32_t split0 = start + n / 2;
Point32 p = originalVertices[split0 - 1]->point;
int32_t split1 = split0;
while ((split1 < end) && (originalVertices[split1]->point == p)) {
split1++;
}
computeInternal(start, split0, result);
IntermediateHull hull1;
computeInternal(split1, end, hull1);
#ifdef DEBUG_CONVEX_HULL
printf("\n\nMerge\n");
result.print();
hull1.print();
#endif
merge(result, hull1);
#ifdef DEBUG_CONVEX_HULL
printf("\n Result\n");
result.print();
#endif
}
#ifdef DEBUG_CONVEX_HULL
void btConvexHullInternal::IntermediateHull::print()
{
printf(" Hull\n");
for (Vertex* v = minXy; v;) {
printf(" ");
v->print();
if (v == maxXy) {
printf(" maxXy");
}
if (v == minYx) {
printf(" minYx");
}
if (v == maxYx) {
printf(" maxYx");
}
if (v->next->prev != v) {
printf(" Inconsistency");
}
printf("\n");
v = v->next;
if (v == minXy) {
break;
}
}
if (minXy) {
minXy->copy = (minXy->copy == -1) ? -2 : -1;
minXy->printGraph();
}
}
void btConvexHullInternal::Vertex::printGraph()
{
print();
printf("\nEdges\n");
Edge* e = edges;
if (e) {
do {
e->print();
printf("\n");
e = e->next;
} while (e != edges);
do {
Vertex* v = e->target;
if (v->copy != copy) {
v->copy = copy;
v->printGraph();
}
e = e->next;
} while (e != edges);
}
}
#endif
btConvexHullInternal::Orientation btConvexHullInternal::getOrientation(const Edge* prev, const Edge* next, const Point32& s, const Point32& t)
{
btAssert(prev->reverse->target == next->reverse->target);
if (prev->next == next) {
if (prev->prev == next) {
Point64 n = t.cross(s);
Point64 m = (*prev->target - *next->reverse->target).cross(*next->target - *next->reverse->target);
btAssert(!m.isZero());
int64_t dot = n.dot(m);
btAssert(dot != 0);
return (dot > 0) ? COUNTER_CLOCKWISE : CLOCKWISE;
}
return COUNTER_CLOCKWISE;
}
else if (prev->prev == next) {
return CLOCKWISE;
}
else {
return NONE;
}
}
btConvexHullInternal::Edge* btConvexHullInternal::findMaxAngle(bool ccw, const Vertex* start, const Point32& s, const Point64& rxs, const Point64& sxrxs, Rational64& minCot)
{
Edge* minEdge = NULL;
#ifdef DEBUG_CONVEX_HULL
printf("find max edge for %d\n", start->point.index);
#endif
Edge* e = start->edges;
if (e) {
do {
if (e->copy > mergeStamp) {
Point32 t = *e->target - *start;
Rational64 cot(t.dot(sxrxs), t.dot(rxs));
#ifdef DEBUG_CONVEX_HULL
printf(" Angle is %f (%d) for ", (float)btAtan(cot.toScalar()), (int32_t)cot.isNaN());
e->print();
#endif
if (cot.isNaN()) {
btAssert(ccw ? (t.dot(s) < 0) : (t.dot(s) > 0));
}
else {
int32_t cmp;
if (minEdge == NULL) {
minCot = cot;
minEdge = e;
}
else if ((cmp = cot.compare(minCot)) < 0) {
minCot = cot;
minEdge = e;
}
else if ((cmp == 0) && (ccw == (getOrientation(minEdge, e, s, t) == COUNTER_CLOCKWISE))) {
minEdge = e;
}
}
#ifdef DEBUG_CONVEX_HULL
printf("\n");
#endif
}
e = e->next;
} while (e != start->edges);
}
return minEdge;
}
void btConvexHullInternal::findEdgeForCoplanarFaces(Vertex* c0, Vertex* c1, Edge*& e0, Edge*& e1, Vertex* stop0, Vertex* stop1)
{
Edge* start0 = e0;
Edge* start1 = e1;
Point32 et0 = start0 ? start0->target->point : c0->point;
Point32 et1 = start1 ? start1->target->point : c1->point;
Point32 s = c1->point - c0->point;
Point64 normal = ((start0 ? start0 : start1)->target->point - c0->point).cross(s);
int64_t dist = c0->point.dot(normal);
btAssert(!start1 || (start1->target->point.dot(normal) == dist));
Point64 perp = s.cross(normal);
btAssert(!perp.isZero());
#ifdef DEBUG_CONVEX_HULL
printf(" Advancing %d %d (%p %p, %d %d)\n", c0->point.index, c1->point.index, start0, start1, start0 ? start0->target->point.index : -1, start1 ? start1->target->point.index : -1);
#endif
int64_t maxDot0 = et0.dot(perp);
if (e0) {
while (e0->target != stop0) {
Edge* e = e0->reverse->prev;
if (e->target->point.dot(normal) < dist) {
break;
}
btAssert(e->target->point.dot(normal) == dist);
if (e->copy == mergeStamp) {
break;
}
int64_t dot = e->target->point.dot(perp);
if (dot <= maxDot0) {
break;
}
maxDot0 = dot;
e0 = e;
et0 = e->target->point;
}
}
int64_t maxDot1 = et1.dot(perp);
if (e1) {
while (e1->target != stop1) {
Edge* e = e1->reverse->next;
if (e->target->point.dot(normal) < dist) {
break;
}
btAssert(e->target->point.dot(normal) == dist);
if (e->copy == mergeStamp) {
break;
}
int64_t dot = e->target->point.dot(perp);
if (dot <= maxDot1) {
break;
}
maxDot1 = dot;
e1 = e;
et1 = e->target->point;
}
}
#ifdef DEBUG_CONVEX_HULL
printf(" Starting at %d %d\n", et0.index, et1.index);
#endif
int64_t dx = maxDot1 - maxDot0;
if (dx > 0) {
while (true) {
int64_t dy = (et1 - et0).dot(s);
if (e0 && (e0->target != stop0)) {
Edge* f0 = e0->next->reverse;
if (f0->copy > mergeStamp) {
int64_t dx0 = (f0->target->point - et0).dot(perp);
int64_t dy0 = (f0->target->point - et0).dot(s);
if ((dx0 == 0) ? (dy0 < 0) : ((dx0 < 0) && (Rational64(dy0, dx0).compare(Rational64(dy, dx)) >= 0))) {
et0 = f0->target->point;
dx = (et1 - et0).dot(perp);
e0 = (e0 == start0) ? NULL : f0;
continue;
}
}
}
if (e1 && (e1->target != stop1)) {
Edge* f1 = e1->reverse->next;
if (f1->copy > mergeStamp) {
Point32 d1 = f1->target->point - et1;
if (d1.dot(normal) == 0) {
int64_t dx1 = d1.dot(perp);
int64_t dy1 = d1.dot(s);
int64_t dxn = (f1->target->point - et0).dot(perp);
if ((dxn > 0) && ((dx1 == 0) ? (dy1 < 0) : ((dx1 < 0) && (Rational64(dy1, dx1).compare(Rational64(dy, dx)) > 0)))) {
e1 = f1;
et1 = e1->target->point;
dx = dxn;
continue;
}
}
else {
btAssert((e1 == start1) && (d1.dot(normal) < 0));
}
}
}
break;
}
}
else if (dx < 0) {
while (true) {
int64_t dy = (et1 - et0).dot(s);
if (e1 && (e1->target != stop1)) {
Edge* f1 = e1->prev->reverse;
if (f1->copy > mergeStamp) {
int64_t dx1 = (f1->target->point - et1).dot(perp);
int64_t dy1 = (f1->target->point - et1).dot(s);
if ((dx1 == 0) ? (dy1 > 0) : ((dx1 < 0) && (Rational64(dy1, dx1).compare(Rational64(dy, dx)) <= 0))) {
et1 = f1->target->point;
dx = (et1 - et0).dot(perp);
e1 = (e1 == start1) ? NULL : f1;
continue;
}
}
}
if (e0 && (e0->target != stop0)) {
Edge* f0 = e0->reverse->prev;
if (f0->copy > mergeStamp) {
Point32 d0 = f0->target->point - et0;
if (d0.dot(normal) == 0) {
int64_t dx0 = d0.dot(perp);
int64_t dy0 = d0.dot(s);
int64_t dxn = (et1 - f0->target->point).dot(perp);
if ((dxn < 0) && ((dx0 == 0) ? (dy0 > 0) : ((dx0 < 0) && (Rational64(dy0, dx0).compare(Rational64(dy, dx)) < 0)))) {
e0 = f0;
et0 = e0->target->point;
dx = dxn;
continue;
}
}
else {
btAssert((e0 == start0) && (d0.dot(normal) < 0));
}
}
}
break;
}
}
#ifdef DEBUG_CONVEX_HULL
printf(" Advanced edges to %d %d\n", et0.index, et1.index);
#endif
}
void btConvexHullInternal::merge(IntermediateHull& h0, IntermediateHull& h1)
{
if (!h1.maxXy) {
return;
}
if (!h0.maxXy) {
h0 = h1;
return;
}
mergeStamp--;
Vertex* c0 = NULL;
Edge* toPrev0 = NULL;
Edge* firstNew0 = NULL;
Edge* pendingHead0 = NULL;
Edge* pendingTail0 = NULL;
Vertex* c1 = NULL;
Edge* toPrev1 = NULL;
Edge* firstNew1 = NULL;
Edge* pendingHead1 = NULL;
Edge* pendingTail1 = NULL;
Point32 prevPoint;
if (mergeProjection(h0, h1, c0, c1)) {
Point32 s = *c1 - *c0;
Point64 normal = Point32(0, 0, -1).cross(s);
Point64 t = s.cross(normal);
btAssert(!t.isZero());
Edge* e = c0->edges;
Edge* start0 = NULL;
if (e) {
do {
int64_t dot = (*e->target - *c0).dot(normal);
btAssert(dot <= 0);
if ((dot == 0) && ((*e->target - *c0).dot(t) > 0)) {
if (!start0 || (getOrientation(start0, e, s, Point32(0, 0, -1)) == CLOCKWISE)) {
start0 = e;
}
}
e = e->next;
} while (e != c0->edges);
}
e = c1->edges;
Edge* start1 = NULL;
if (e) {
do {
int64_t dot = (*e->target - *c1).dot(normal);
btAssert(dot <= 0);
if ((dot == 0) && ((*e->target - *c1).dot(t) > 0)) {
if (!start1 || (getOrientation(start1, e, s, Point32(0, 0, -1)) == COUNTER_CLOCKWISE)) {
start1 = e;
}
}
e = e->next;
} while (e != c1->edges);
}
if (start0 || start1) {
findEdgeForCoplanarFaces(c0, c1, start0, start1, NULL, NULL);
if (start0) {
c0 = start0->target;
}
if (start1) {
c1 = start1->target;
}
}
prevPoint = c1->point;
prevPoint.z++;
}
else {
prevPoint = c1->point;
prevPoint.x++;
}
Vertex* first0 = c0;
Vertex* first1 = c1;
bool firstRun = true;
while (true) {
Point32 s = *c1 - *c0;
Point32 r = prevPoint - c0->point;
Point64 rxs = r.cross(s);
Point64 sxrxs = s.cross(rxs);
#ifdef DEBUG_CONVEX_HULL
printf("\n Checking %d %d\n", c0->point.index, c1->point.index);
#endif
Rational64 minCot0(0, 0);
Edge* min0 = findMaxAngle(false, c0, s, rxs, sxrxs, minCot0);
Rational64 minCot1(0, 0);
Edge* min1 = findMaxAngle(true, c1, s, rxs, sxrxs, minCot1);
if (!min0 && !min1) {
Edge* e = newEdgePair(c0, c1);
e->link(e);
c0->edges = e;
e = e->reverse;
e->link(e);
c1->edges = e;
return;
}
else {
int32_t cmp = !min0 ? 1 : !min1 ? -1 : minCot0.compare(minCot1);
#ifdef DEBUG_CONVEX_HULL
printf(" -> Result %d\n", cmp);
#endif
if (firstRun || ((cmp >= 0) ? !minCot1.isNegativeInfinity() : !minCot0.isNegativeInfinity())) {
Edge* e = newEdgePair(c0, c1);
if (pendingTail0) {
pendingTail0->prev = e;
}
else {
pendingHead0 = e;
}
e->next = pendingTail0;
pendingTail0 = e;
e = e->reverse;
if (pendingTail1) {
pendingTail1->next = e;
}
else {
pendingHead1 = e;
}
e->prev = pendingTail1;
pendingTail1 = e;
}
Edge* e0 = min0;
Edge* e1 = min1;
#ifdef DEBUG_CONVEX_HULL
printf(" Found min edges to %d %d\n", e0 ? e0->target->point.index : -1, e1 ? e1->target->point.index : -1);
#endif
if (cmp == 0) {
findEdgeForCoplanarFaces(c0, c1, e0, e1, NULL, NULL);
}
if ((cmp >= 0) && e1) {
if (toPrev1) {
for (Edge *e = toPrev1->next, *n = NULL; e != min1; e = n) {
n = e->next;
removeEdgePair(e);
}
}
if (pendingTail1) {
if (toPrev1) {
toPrev1->link(pendingHead1);
}
else {
min1->prev->link(pendingHead1);
firstNew1 = pendingHead1;
}
pendingTail1->link(min1);
pendingHead1 = NULL;
pendingTail1 = NULL;
}
else if (!toPrev1) {
firstNew1 = min1;
}
prevPoint = c1->point;
c1 = e1->target;
toPrev1 = e1->reverse;
}
if ((cmp <= 0) && e0) {
if (toPrev0) {
for (Edge *e = toPrev0->prev, *n = NULL; e != min0; e = n) {
n = e->prev;
removeEdgePair(e);
}
}
if (pendingTail0) {
if (toPrev0) {
pendingHead0->link(toPrev0);
}
else {
pendingHead0->link(min0->next);
firstNew0 = pendingHead0;
}
min0->link(pendingTail0);
pendingHead0 = NULL;
pendingTail0 = NULL;
}
else if (!toPrev0) {
firstNew0 = min0;
}
prevPoint = c0->point;
c0 = e0->target;
toPrev0 = e0->reverse;
}
}
if ((c0 == first0) && (c1 == first1)) {
if (toPrev0 == NULL) {
pendingHead0->link(pendingTail0);
c0->edges = pendingTail0;
}
else {
for (Edge *e = toPrev0->prev, *n = NULL; e != firstNew0; e = n) {
n = e->prev;
removeEdgePair(e);
}
if (pendingTail0) {
pendingHead0->link(toPrev0);
firstNew0->link(pendingTail0);
}
}
if (toPrev1 == NULL) {
pendingTail1->link(pendingHead1);
c1->edges = pendingTail1;
}
else {
for (Edge *e = toPrev1->next, *n = NULL; e != firstNew1; e = n) {
n = e->next;
removeEdgePair(e);
}
if (pendingTail1) {
toPrev1->link(pendingHead1);
pendingTail1->link(firstNew1);
}
}
return;
}
firstRun = false;
}
}
static bool pointCmp(const btConvexHullInternal::Point32& p, const btConvexHullInternal::Point32& q)
{
return (p.y < q.y) || ((p.y == q.y) && ((p.x < q.x) || ((p.x == q.x) && (p.z < q.z))));
}
void btConvexHullInternal::compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count)
{
btVector3 min(btScalar(1e30), btScalar(1e30), btScalar(1e30)), max(btScalar(-1e30), btScalar(-1e30), btScalar(-1e30));
const char* ptr = (const char*)coords;
if (doubleCoords) {
for (int32_t i = 0; i < count; i++) {
const double* v = (const double*)ptr;
btVector3 p((btScalar)v[0], (btScalar)v[1], (btScalar)v[2]);
ptr += stride;
min.setMin(p);
max.setMax(p);
}
}
else {
for (int32_t i = 0; i < count; i++) {
const float* v = (const float*)ptr;
btVector3 p(v[0], v[1], v[2]);
ptr += stride;
min.setMin(p);
max.setMax(p);
}
}
btVector3 s = max - min;
maxAxis = s.maxAxis();
minAxis = s.minAxis();
if (minAxis == maxAxis) {
minAxis = (maxAxis + 1) % 3;
}
medAxis = 3 - maxAxis - minAxis;
s /= btScalar(10216);
if (((medAxis + 1) % 3) != maxAxis) {
s *= -1;
}
scaling = s;
if (s[0] != 0) {
s[0] = btScalar(1) / s[0];
}
if (s[1] != 0) {
s[1] = btScalar(1) / s[1];
}
if (s[2] != 0) {
s[2] = btScalar(1) / s[2];
}
center = (min + max) * btScalar(0.5);
btAlignedObjectArray<Point32> points;
points.resize(count);
ptr = (const char*)coords;
if (doubleCoords) {
for (int32_t i = 0; i < count; i++) {
const double* v = (const double*)ptr;
btVector3 p((btScalar)v[0], (btScalar)v[1], (btScalar)v[2]);
ptr += stride;
p = (p - center) * s;
points[i].x = (int32_t)p[medAxis];
points[i].y = (int32_t)p[maxAxis];
points[i].z = (int32_t)p[minAxis];
points[i].index = i;
}
}
else {
for (int32_t i = 0; i < count; i++) {
const float* v = (const float*)ptr;
btVector3 p(v[0], v[1], v[2]);
ptr += stride;
p = (p - center) * s;
points[i].x = (int32_t)p[medAxis];
points[i].y = (int32_t)p[maxAxis];
points[i].z = (int32_t)p[minAxis];
points[i].index = i;
}
}
points.quickSort(pointCmp);
vertexPool.reset();
vertexPool.setArraySize(count);
originalVertices.resize(count);
for (int32_t i = 0; i < count; i++) {
Vertex* v = vertexPool.newObject();
v->edges = NULL;
v->point = points[i];
v->copy = -1;
originalVertices[i] = v;
}
points.clear();
edgePool.reset();
edgePool.setArraySize(6 * count);
usedEdgePairs = 0;
maxUsedEdgePairs = 0;
mergeStamp = -3;
IntermediateHull hull;
computeInternal(0, count, hull);
vertexList = hull.minXy;
#ifdef DEBUG_CONVEX_HULL
printf("max. edges %d (3v = %d)", maxUsedEdgePairs, 3 * count);
#endif
}
btVector3 btConvexHullInternal::toBtVector(const Point32& v)
{
btVector3 p;
p[medAxis] = btScalar(v.x);
p[maxAxis] = btScalar(v.y);
p[minAxis] = btScalar(v.z);
return p * scaling;
}
btVector3 btConvexHullInternal::getBtNormal(Face* face)
{
return toBtVector(face->dir0).cross(toBtVector(face->dir1)).normalized();
}
btVector3 btConvexHullInternal::getCoordinates(const Vertex* v)
{
btVector3 p;
p[medAxis] = v->xvalue();
p[maxAxis] = v->yvalue();
p[minAxis] = v->zvalue();
return p * scaling + center;
}
btScalar btConvexHullInternal::shrink(btScalar amount, btScalar clampAmount)
{
if (!vertexList) {
return 0;
}
int32_t stamp = --mergeStamp;
btAlignedObjectArray<Vertex*> stack;
vertexList->copy = stamp;
stack.push_back(vertexList);
btAlignedObjectArray<Face*> faces;
Point32 ref = vertexList->point;
Int128 hullCenterX(0, 0);
Int128 hullCenterY(0, 0);
Int128 hullCenterZ(0, 0);
Int128 volume(0, 0);
while (stack.size() > 0) {
Vertex* v = stack[stack.size() - 1];
stack.pop_back();
Edge* e = v->edges;
if (e) {
do {
if (e->target->copy != stamp) {
e->target->copy = stamp;
stack.push_back(e->target);
}
if (e->copy != stamp) {
Face* face = facePool.newObject();
face->init(e->target, e->reverse->prev->target, v);
faces.push_back(face);
Edge* f = e;
Vertex* a = NULL;
Vertex* b = NULL;
do {
if (a && b) {
int64_t vol = (v->point - ref).dot((a->point - ref).cross(b->point - ref));
btAssert(vol >= 0);
Point32 c = v->point + a->point + b->point + ref;
hullCenterX += vol * c.x;
hullCenterY += vol * c.y;
hullCenterZ += vol * c.z;
volume += vol;
}
btAssert(f->copy != stamp);
f->copy = stamp;
f->face = face;
a = b;
b = f->target;
f = f->reverse->prev;
} while (f != e);
}
e = e->next;
} while (e != v->edges);
}
}
if (volume.getSign() <= 0) {
return 0;
}
btVector3 hullCenter;
hullCenter[medAxis] = hullCenterX.toScalar();
hullCenter[maxAxis] = hullCenterY.toScalar();
hullCenter[minAxis] = hullCenterZ.toScalar();
hullCenter /= 4 * volume.toScalar();
hullCenter *= scaling;
int32_t faceCount = faces.size();
if (clampAmount > 0) {
btScalar minDist = SIMD_INFINITY;
for (int32_t i = 0; i < faceCount; i++) {
btVector3 normal = getBtNormal(faces[i]);
btScalar dist = normal.dot(toBtVector(faces[i]->origin) - hullCenter);
if (dist < minDist) {
minDist = dist;
}
}
if (minDist <= 0) {
return 0;
}
amount = btMin(amount, minDist * clampAmount);
}
uint32_t seed = 243703;
for (int32_t i = 0; i < faceCount; i++, seed = 1664525 * seed + 1013904223) {
btSwap(faces[i], faces[seed % faceCount]);
}
for (int32_t i = 0; i < faceCount; i++) {
if (!shiftFace(faces[i], amount, stack)) {
return -amount;
}
}
return amount;
}
bool btConvexHullInternal::shiftFace(Face* face, btScalar amount, btAlignedObjectArray<Vertex*> stack)
{
btVector3 origShift = getBtNormal(face) * -amount;
if (scaling[0] != 0) {
origShift[0] /= scaling[0];
}
if (scaling[1] != 0) {
origShift[1] /= scaling[1];
}
if (scaling[2] != 0) {
origShift[2] /= scaling[2];
}
Point32 shift((int32_t)origShift[medAxis], (int32_t)origShift[maxAxis], (int32_t)origShift[minAxis]);
if (shift.isZero()) {
return true;
}
Point64 normal = face->getNormal();
#ifdef DEBUG_CONVEX_HULL
printf("\nShrinking face (%d %d %d) (%d %d %d) (%d %d %d) by (%d %d %d)\n",
face->origin.x, face->origin.y, face->origin.z, face->dir0.x, face->dir0.y, face->dir0.z, face->dir1.x, face->dir1.y, face->dir1.z, shift.x, shift.y, shift.z);
#endif
int64_t origDot = face->origin.dot(normal);
Point32 shiftedOrigin = face->origin + shift;
int64_t shiftedDot = shiftedOrigin.dot(normal);
btAssert(shiftedDot <= origDot);
if (shiftedDot >= origDot) {
return false;
}
Edge* intersection = NULL;
Edge* startEdge = face->nearbyVertex->edges;
#ifdef DEBUG_CONVEX_HULL
printf("Start edge is ");
startEdge->print();
printf(", normal is (%lld %lld %lld), shifted dot is %lld\n", normal.x, normal.y, normal.z, shiftedDot);
#endif
Rational128 optDot = face->nearbyVertex->dot(normal);
int32_t cmp = optDot.compare(shiftedDot);
#ifdef SHOW_ITERATIONS
int32_t n = 0;
#endif
if (cmp >= 0) {
Edge* e = startEdge;
do {
#ifdef SHOW_ITERATIONS
n++;
#endif
Rational128 dot = e->target->dot(normal);
btAssert(dot.compare(origDot) <= 0);
#ifdef DEBUG_CONVEX_HULL
printf("Moving downwards, edge is ");
e->print();
printf(", dot is %f (%f %lld)\n", (float)dot.toScalar(), (float)optDot.toScalar(), shiftedDot);
#endif
if (dot.compare(optDot) < 0) {
int32_t c = dot.compare(shiftedDot);
optDot = dot;
e = e->reverse;
startEdge = e;
if (c < 0) {
intersection = e;
break;
}
cmp = c;
}
e = e->prev;
} while (e != startEdge);
if (!intersection) {
return false;
}
}
else {
Edge* e = startEdge;
do {
#ifdef SHOW_ITERATIONS
n++;
#endif
Rational128 dot = e->target->dot(normal);
btAssert(dot.compare(origDot) <= 0);
#ifdef DEBUG_CONVEX_HULL
printf("Moving upwards, edge is ");
e->print();
printf(", dot is %f (%f %lld)\n", (float)dot.toScalar(), (float)optDot.toScalar(), shiftedDot);
#endif
if (dot.compare(optDot) > 0) {
cmp = dot.compare(shiftedDot);
if (cmp >= 0) {
intersection = e;
break;
}
optDot = dot;
e = e->reverse;
startEdge = e;
}
e = e->prev;
} while (e != startEdge);
if (!intersection) {
return true;
}
}
#ifdef SHOW_ITERATIONS
printf("Needed %d iterations to find initial intersection\n", n);
#endif
if (cmp == 0) {
Edge* e = intersection->reverse->next;
#ifdef SHOW_ITERATIONS
n = 0;
#endif
while (e->target->dot(normal).compare(shiftedDot) <= 0) {
#ifdef SHOW_ITERATIONS
n++;
#endif
e = e->next;
if (e == intersection->reverse) {
return true;
}
#ifdef DEBUG_CONVEX_HULL
printf("Checking for outwards edge, current edge is ");
e->print();
printf("\n");
#endif
}
#ifdef SHOW_ITERATIONS
printf("Needed %d iterations to check for complete containment\n", n);
#endif
}
Edge* firstIntersection = NULL;
Edge* faceEdge = NULL;
Edge* firstFaceEdge = NULL;
#ifdef SHOW_ITERATIONS
int32_t m = 0;
#endif
while (true) {
#ifdef SHOW_ITERATIONS
m++;
#endif
#ifdef DEBUG_CONVEX_HULL
printf("Intersecting edge is ");
intersection->print();
printf("\n");
#endif
if (cmp == 0) {
Edge* e = intersection->reverse->next;
startEdge = e;
#ifdef SHOW_ITERATIONS
n = 0;
#endif
while (true) {
#ifdef SHOW_ITERATIONS
n++;
#endif
if (e->target->dot(normal).compare(shiftedDot) >= 0) {
break;
}
intersection = e->reverse;
e = e->next;
if (e == startEdge) {
return true;
}
}
#ifdef SHOW_ITERATIONS
printf("Needed %d iterations to advance intersection\n", n);
#endif
}
#ifdef DEBUG_CONVEX_HULL
printf("Advanced intersecting edge to ");
intersection->print();
printf(", cmp = %d\n", cmp);
#endif
if (!firstIntersection) {
firstIntersection = intersection;
}
else if (intersection == firstIntersection) {
break;
}
int32_t prevCmp = cmp;
Edge* prevIntersection = intersection;
Edge* prevFaceEdge = faceEdge;
Edge* e = intersection->reverse;
#ifdef SHOW_ITERATIONS
n = 0;
#endif
while (true) {
#ifdef SHOW_ITERATIONS
n++;
#endif
e = e->reverse->prev;
btAssert(e != intersection->reverse);
cmp = e->target->dot(normal).compare(shiftedDot);
#ifdef DEBUG_CONVEX_HULL
printf("Testing edge ");
e->print();
printf(" -> cmp = %d\n", cmp);
#endif
if (cmp >= 0) {
intersection = e;
break;
}
}
#ifdef SHOW_ITERATIONS
printf("Needed %d iterations to find other intersection of face\n", n);
#endif
if (cmp > 0) {
Vertex* removed = intersection->target;
e = intersection->reverse;
if (e->prev == e) {
removed->edges = NULL;
}
else {
removed->edges = e->prev;
e->prev->link(e->next);
e->link(e);
}
#ifdef DEBUG_CONVEX_HULL
printf("1: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z);
#endif
Point64 n0 = intersection->face->getNormal();
Point64 n1 = intersection->reverse->face->getNormal();
int64_t m00 = face->dir0.dot(n0);
int64_t m01 = face->dir1.dot(n0);
int64_t m10 = face->dir0.dot(n1);
int64_t m11 = face->dir1.dot(n1);
int64_t r0 = (intersection->face->origin - shiftedOrigin).dot(n0);
int64_t r1 = (intersection->reverse->face->origin - shiftedOrigin).dot(n1);
Int128 det = Int128::mul(m00, m11) - Int128::mul(m01, m10);
btAssert(det.getSign() != 0);
Vertex* v = vertexPool.newObject();
v->point.index = -1;
v->copy = -1;
v->point128 = PointR128(Int128::mul(face->dir0.x * r0, m11) - Int128::mul(face->dir0.x * r1, m01)
+ Int128::mul(face->dir1.x * r1, m00) - Int128::mul(face->dir1.x * r0, m10) + det * shiftedOrigin.x,
Int128::mul(face->dir0.y * r0, m11) - Int128::mul(face->dir0.y * r1, m01)
+ Int128::mul(face->dir1.y * r1, m00) - Int128::mul(face->dir1.y * r0, m10) + det * shiftedOrigin.y,
Int128::mul(face->dir0.z * r0, m11) - Int128::mul(face->dir0.z * r1, m01)
+ Int128::mul(face->dir1.z * r1, m00) - Int128::mul(face->dir1.z * r0, m10) + det * shiftedOrigin.z,
det);
v->point.x = (int32_t)v->point128.xvalue();
v->point.y = (int32_t)v->point128.yvalue();
v->point.z = (int32_t)v->point128.zvalue();
intersection->target = v;
v->edges = e;
stack.push_back(v);
stack.push_back(removed);
stack.push_back(NULL);
}
if (cmp || prevCmp || (prevIntersection->reverse->next->target != intersection->target)) {
faceEdge = newEdgePair(prevIntersection->target, intersection->target);
if (prevCmp == 0) {
faceEdge->link(prevIntersection->reverse->next);
}
if ((prevCmp == 0) || prevFaceEdge) {
prevIntersection->reverse->link(faceEdge);
}
if (cmp == 0) {
intersection->reverse->prev->link(faceEdge->reverse);
}
faceEdge->reverse->link(intersection->reverse);
}
else {
faceEdge = prevIntersection->reverse->next;
}
if (prevFaceEdge) {
if (prevCmp > 0) {
faceEdge->link(prevFaceEdge->reverse);
}
else if (faceEdge != prevFaceEdge->reverse) {
stack.push_back(prevFaceEdge->target);
while (faceEdge->next != prevFaceEdge->reverse) {
Vertex* removed = faceEdge->next->target;
removeEdgePair(faceEdge->next);
stack.push_back(removed);
#ifdef DEBUG_CONVEX_HULL
printf("2: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z);
#endif
}
stack.push_back(NULL);
}
}
faceEdge->face = face;
faceEdge->reverse->face = intersection->face;
if (!firstFaceEdge) {
firstFaceEdge = faceEdge;
}
}
#ifdef SHOW_ITERATIONS
printf("Needed %d iterations to process all intersections\n", m);
#endif
if (cmp > 0) {
firstFaceEdge->reverse->target = faceEdge->target;
firstIntersection->reverse->link(firstFaceEdge);
firstFaceEdge->link(faceEdge->reverse);
}
else if (firstFaceEdge != faceEdge->reverse) {
stack.push_back(faceEdge->target);
while (firstFaceEdge->next != faceEdge->reverse) {
Vertex* removed = firstFaceEdge->next->target;
removeEdgePair(firstFaceEdge->next);
stack.push_back(removed);
#ifdef DEBUG_CONVEX_HULL
printf("3: Removed part contains (%d %d %d)\n", removed->point.x, removed->point.y, removed->point.z);
#endif
}
stack.push_back(NULL);
}
btAssert(stack.size() > 0);
vertexList = stack[0];
#ifdef DEBUG_CONVEX_HULL
printf("Removing part\n");
#endif
#ifdef SHOW_ITERATIONS
n = 0;
#endif
int32_t pos = 0;
while (pos < stack.size()) {
int32_t end = stack.size();
while (pos < end) {
Vertex* kept = stack[pos++];
#ifdef DEBUG_CONVEX_HULL
kept->print();
#endif
bool deeper = false;
Vertex* removed;
while ((removed = stack[pos++]) != NULL) {
#ifdef SHOW_ITERATIONS
n++;
#endif
kept->receiveNearbyFaces(removed);
while (removed->edges) {
if (!deeper) {
deeper = true;
stack.push_back(kept);
}
stack.push_back(removed->edges->target);
removeEdgePair(removed->edges);
}
}
if (deeper) {
stack.push_back(NULL);
}
}
}
#ifdef SHOW_ITERATIONS
printf("Needed %d iterations to remove part\n", n);
#endif
stack.resize(0);
face->origin = shiftedOrigin;
return true;
}
static int32_t getVertexCopy(btConvexHullInternal::Vertex* vertex, btAlignedObjectArray<btConvexHullInternal::Vertex*>& vertices)
{
int32_t index = vertex->copy;
if (index < 0) {
index = vertices.size();
vertex->copy = index;
vertices.push_back(vertex);
#ifdef DEBUG_CONVEX_HULL
printf("Vertex %d gets index *%d\n", vertex->point.index, index);
#endif
}
return index;
}
btScalar btConvexHullComputer::compute(const void* coords, bool doubleCoords, int32_t stride, int32_t count, btScalar shrink, btScalar shrinkClamp)
{
if (count <= 0) {
vertices.clear();
edges.clear();
faces.clear();
return 0;
}
btConvexHullInternal hull;
hull.compute(coords, doubleCoords, stride, count);
btScalar shift = 0;
if ((shrink > 0) && ((shift = hull.shrink(shrink, shrinkClamp)) < 0)) {
vertices.clear();
edges.clear();
faces.clear();
return shift;
}
vertices.resize(0);
edges.resize(0);
faces.resize(0);
btAlignedObjectArray<btConvexHullInternal::Vertex*> oldVertices;
getVertexCopy(hull.vertexList, oldVertices);
int32_t copied = 0;
while (copied < oldVertices.size()) {
btConvexHullInternal::Vertex* v = oldVertices[copied];
vertices.push_back(hull.getCoordinates(v));
btConvexHullInternal::Edge* firstEdge = v->edges;
if (firstEdge) {
int32_t firstCopy = -1;
int32_t prevCopy = -1;
btConvexHullInternal::Edge* e = firstEdge;
do {
if (e->copy < 0) {
int32_t s = edges.size();
edges.push_back(Edge());
edges.push_back(Edge());
Edge* c = &edges[s];
Edge* r = &edges[s + 1];
e->copy = s;
e->reverse->copy = s + 1;
c->reverse = 1;
r->reverse = -1;
c->targetVertex = getVertexCopy(e->target, oldVertices);
r->targetVertex = copied;
#ifdef DEBUG_CONVEX_HULL
printf(" CREATE: Vertex *%d has edge to *%d\n", copied, c->getTargetVertex());
#endif
}
if (prevCopy >= 0) {
edges[e->copy].next = prevCopy - e->copy;
}
else {
firstCopy = e->copy;
}
prevCopy = e->copy;
e = e->next;
} while (e != firstEdge);
edges[firstCopy].next = prevCopy - firstCopy;
}
copied++;
}
for (int32_t i = 0; i < copied; i++) {
btConvexHullInternal::Vertex* v = oldVertices[i];
btConvexHullInternal::Edge* firstEdge = v->edges;
if (firstEdge) {
btConvexHullInternal::Edge* e = firstEdge;
do {
if (e->copy >= 0) {
#ifdef DEBUG_CONVEX_HULL
printf("Vertex *%d has edge to *%d\n", i, edges[e->copy].getTargetVertex());
#endif
faces.push_back(e->copy);
btConvexHullInternal::Edge* f = e;
do {
#ifdef DEBUG_CONVEX_HULL
printf(" Face *%d\n", edges[f->copy].getTargetVertex());
#endif
f->copy = -1;
f = f->reverse->prev;
} while (f != e);
}
e = e->next;
} while (e != firstEdge);
}
}
return shift;
}
| 71,274 | C++ | 27.670555 | 243 | 0.455187 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdManifoldMesh.cpp | /* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "vhacdManifoldMesh.h"
namespace VHACD {
TMMVertex::TMMVertex(void)
{
Initialize();
}
void TMMVertex::Initialize()
{
m_name = 0;
m_id = 0;
m_duplicate = 0;
m_onHull = false;
m_tag = false;
}
TMMVertex::~TMMVertex(void)
{
}
TMMEdge::TMMEdge(void)
{
Initialize();
}
void TMMEdge::Initialize()
{
m_id = 0;
m_triangles[0] = m_triangles[1] = m_newFace = 0;
m_vertices[0] = m_vertices[1] = 0;
}
TMMEdge::~TMMEdge(void)
{
}
void TMMTriangle::Initialize()
{
m_id = 0;
for (int32_t i = 0; i < 3; i++) {
m_edges[i] = 0;
m_vertices[0] = 0;
}
m_visible = false;
}
TMMTriangle::TMMTriangle(void)
{
Initialize();
}
TMMTriangle::~TMMTriangle(void)
{
}
TMMesh::TMMesh()
{
}
TMMesh::~TMMesh(void)
{
}
void TMMesh::GetIFS(Vec3<double>* const points, Vec3<int32_t>* const triangles)
{
size_t nV = m_vertices.GetSize();
size_t nT = m_triangles.GetSize();
for (size_t v = 0; v < nV; v++) {
points[v] = m_vertices.GetData().m_pos;
m_vertices.GetData().m_id = v;
m_vertices.Next();
}
for (size_t f = 0; f < nT; f++) {
TMMTriangle& currentTriangle = m_triangles.GetData();
triangles[f].X() = static_cast<int32_t>(currentTriangle.m_vertices[0]->GetData().m_id);
triangles[f].Y() = static_cast<int32_t>(currentTriangle.m_vertices[1]->GetData().m_id);
triangles[f].Z() = static_cast<int32_t>(currentTriangle.m_vertices[2]->GetData().m_id);
m_triangles.Next();
}
}
void TMMesh::Clear()
{
m_vertices.Clear();
m_edges.Clear();
m_triangles.Clear();
}
void TMMesh::Copy(TMMesh& mesh)
{
Clear();
// updating the id's
size_t nV = mesh.m_vertices.GetSize();
size_t nE = mesh.m_edges.GetSize();
size_t nT = mesh.m_triangles.GetSize();
for (size_t v = 0; v < nV; v++) {
mesh.m_vertices.GetData().m_id = v;
mesh.m_vertices.Next();
}
for (size_t e = 0; e < nE; e++) {
mesh.m_edges.GetData().m_id = e;
mesh.m_edges.Next();
}
for (size_t f = 0; f < nT; f++) {
mesh.m_triangles.GetData().m_id = f;
mesh.m_triangles.Next();
}
// copying data
m_vertices = mesh.m_vertices;
m_edges = mesh.m_edges;
m_triangles = mesh.m_triangles;
// generate mapping
CircularListElement<TMMVertex>** vertexMap = new CircularListElement<TMMVertex>*[nV];
CircularListElement<TMMEdge>** edgeMap = new CircularListElement<TMMEdge>*[nE];
CircularListElement<TMMTriangle>** triangleMap = new CircularListElement<TMMTriangle>*[nT];
for (size_t v = 0; v < nV; v++) {
vertexMap[v] = m_vertices.GetHead();
m_vertices.Next();
}
for (size_t e = 0; e < nE; e++) {
edgeMap[e] = m_edges.GetHead();
m_edges.Next();
}
for (size_t f = 0; f < nT; f++) {
triangleMap[f] = m_triangles.GetHead();
m_triangles.Next();
}
// updating pointers
for (size_t v = 0; v < nV; v++) {
if (vertexMap[v]->GetData().m_duplicate) {
vertexMap[v]->GetData().m_duplicate = edgeMap[vertexMap[v]->GetData().m_duplicate->GetData().m_id];
}
}
for (size_t e = 0; e < nE; e++) {
if (edgeMap[e]->GetData().m_newFace) {
edgeMap[e]->GetData().m_newFace = triangleMap[edgeMap[e]->GetData().m_newFace->GetData().m_id];
}
if (nT > 0) {
for (int32_t f = 0; f < 2; f++) {
if (edgeMap[e]->GetData().m_triangles[f]) {
edgeMap[e]->GetData().m_triangles[f] = triangleMap[edgeMap[e]->GetData().m_triangles[f]->GetData().m_id];
}
}
}
for (int32_t v = 0; v < 2; v++) {
if (edgeMap[e]->GetData().m_vertices[v]) {
edgeMap[e]->GetData().m_vertices[v] = vertexMap[edgeMap[e]->GetData().m_vertices[v]->GetData().m_id];
}
}
}
for (size_t f = 0; f < nT; f++) {
if (nE > 0) {
for (int32_t e = 0; e < 3; e++) {
if (triangleMap[f]->GetData().m_edges[e]) {
triangleMap[f]->GetData().m_edges[e] = edgeMap[triangleMap[f]->GetData().m_edges[e]->GetData().m_id];
}
}
}
for (int32_t v = 0; v < 3; v++) {
if (triangleMap[f]->GetData().m_vertices[v]) {
triangleMap[f]->GetData().m_vertices[v] = vertexMap[triangleMap[f]->GetData().m_vertices[v]->GetData().m_id];
}
}
}
delete[] vertexMap;
delete[] edgeMap;
delete[] triangleMap;
}
bool TMMesh::CheckConsistancy()
{
size_t nE = m_edges.GetSize();
size_t nT = m_triangles.GetSize();
for (size_t e = 0; e < nE; e++) {
for (int32_t f = 0; f < 2; f++) {
if (!m_edges.GetHead()->GetData().m_triangles[f]) {
return false;
}
}
m_edges.Next();
}
for (size_t f = 0; f < nT; f++) {
for (int32_t e = 0; e < 3; e++) {
int32_t found = 0;
for (int32_t k = 0; k < 2; k++) {
if (m_triangles.GetHead()->GetData().m_edges[e]->GetData().m_triangles[k] == m_triangles.GetHead()) {
found++;
}
}
if (found != 1) {
return false;
}
}
m_triangles.Next();
}
return true;
}
} | 6,863 | C++ | 32.980198 | 756 | 0.576716 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdICHull.cpp | /* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "vhacdICHull.h"
#include <limits>
#ifdef _MSC_VER
#pragma warning(disable:4456 4706)
#endif
namespace VHACD {
const double ICHull::sc_eps = 1.0e-15;
const int32_t ICHull::sc_dummyIndex = std::numeric_limits<int32_t>::max();
ICHull::ICHull()
{
m_isFlat = false;
}
bool ICHull::AddPoints(const Vec3<double>* points, size_t nPoints)
{
if (!points) {
return false;
}
CircularListElement<TMMVertex>* vertex = NULL;
for (size_t i = 0; i < nPoints; i++) {
vertex = m_mesh.AddVertex();
vertex->GetData().m_pos.X() = points[i].X();
vertex->GetData().m_pos.Y() = points[i].Y();
vertex->GetData().m_pos.Z() = points[i].Z();
vertex->GetData().m_name = static_cast<int32_t>(i);
}
return true;
}
bool ICHull::AddPoint(const Vec3<double>& point, int32_t id)
{
if (AddPoints(&point, 1)) {
m_mesh.m_vertices.GetData().m_name = id;
return true;
}
return false;
}
ICHullError ICHull::Process()
{
uint32_t addedPoints = 0;
if (m_mesh.GetNVertices() < 3) {
return ICHullErrorNotEnoughPoints;
}
if (m_mesh.GetNVertices() == 3) {
m_isFlat = true;
CircularListElement<TMMTriangle>* t1 = m_mesh.AddTriangle();
CircularListElement<TMMTriangle>* t2 = m_mesh.AddTriangle();
CircularListElement<TMMVertex>* v0 = m_mesh.m_vertices.GetHead();
CircularListElement<TMMVertex>* v1 = v0->GetNext();
CircularListElement<TMMVertex>* v2 = v1->GetNext();
// Compute the normal to the plane
Vec3<double> p0 = v0->GetData().m_pos;
Vec3<double> p1 = v1->GetData().m_pos;
Vec3<double> p2 = v2->GetData().m_pos;
m_normal = (p1 - p0) ^ (p2 - p0);
m_normal.Normalize();
t1->GetData().m_vertices[0] = v0;
t1->GetData().m_vertices[1] = v1;
t1->GetData().m_vertices[2] = v2;
t2->GetData().m_vertices[0] = v1;
t2->GetData().m_vertices[1] = v2;
t2->GetData().m_vertices[2] = v2;
return ICHullErrorOK;
}
if (m_isFlat) {
m_mesh.m_edges.Clear();
m_mesh.m_triangles.Clear();
m_isFlat = false;
}
if (m_mesh.GetNTriangles() == 0) // we have to create the first polyhedron
{
ICHullError res = DoubleTriangle();
if (res != ICHullErrorOK) {
return res;
}
else {
addedPoints += 3;
}
}
CircularList<TMMVertex>& vertices = m_mesh.GetVertices();
// go to the first added and not processed vertex
while (!(vertices.GetHead()->GetPrev()->GetData().m_tag)) {
vertices.Prev();
}
while (!vertices.GetData().m_tag) // not processed
{
vertices.GetData().m_tag = true;
if (ProcessPoint()) {
addedPoints++;
CleanUp(addedPoints);
vertices.Next();
if (!GetMesh().CheckConsistancy()) {
size_t nV = m_mesh.GetNVertices();
CircularList<TMMVertex>& vertices = m_mesh.GetVertices();
for (size_t v = 0; v < nV; ++v) {
if (vertices.GetData().m_name == sc_dummyIndex) {
vertices.Delete();
break;
}
vertices.Next();
}
return ICHullErrorInconsistent;
}
}
}
if (m_isFlat) {
SArray<CircularListElement<TMMTriangle>*> trianglesToDuplicate;
size_t nT = m_mesh.GetNTriangles();
for (size_t f = 0; f < nT; f++) {
TMMTriangle& currentTriangle = m_mesh.m_triangles.GetHead()->GetData();
if (currentTriangle.m_vertices[0]->GetData().m_name == sc_dummyIndex || currentTriangle.m_vertices[1]->GetData().m_name == sc_dummyIndex || currentTriangle.m_vertices[2]->GetData().m_name == sc_dummyIndex) {
m_trianglesToDelete.PushBack(m_mesh.m_triangles.GetHead());
for (int32_t k = 0; k < 3; k++) {
for (int32_t h = 0; h < 2; h++) {
if (currentTriangle.m_edges[k]->GetData().m_triangles[h] == m_mesh.m_triangles.GetHead()) {
currentTriangle.m_edges[k]->GetData().m_triangles[h] = 0;
break;
}
}
}
}
else {
trianglesToDuplicate.PushBack(m_mesh.m_triangles.GetHead());
}
m_mesh.m_triangles.Next();
}
size_t nE = m_mesh.GetNEdges();
for (size_t e = 0; e < nE; e++) {
TMMEdge& currentEdge = m_mesh.m_edges.GetHead()->GetData();
if (currentEdge.m_triangles[0] == 0 && currentEdge.m_triangles[1] == 0) {
m_edgesToDelete.PushBack(m_mesh.m_edges.GetHead());
}
m_mesh.m_edges.Next();
}
size_t nV = m_mesh.GetNVertices();
CircularList<TMMVertex>& vertices = m_mesh.GetVertices();
for (size_t v = 0; v < nV; ++v) {
if (vertices.GetData().m_name == sc_dummyIndex) {
vertices.Delete();
}
else {
vertices.GetData().m_tag = false;
vertices.Next();
}
}
CleanEdges();
CleanTriangles();
CircularListElement<TMMTriangle>* newTriangle;
for (size_t t = 0; t < trianglesToDuplicate.Size(); t++) {
newTriangle = m_mesh.AddTriangle();
newTriangle->GetData().m_vertices[0] = trianglesToDuplicate[t]->GetData().m_vertices[1];
newTriangle->GetData().m_vertices[1] = trianglesToDuplicate[t]->GetData().m_vertices[0];
newTriangle->GetData().m_vertices[2] = trianglesToDuplicate[t]->GetData().m_vertices[2];
}
}
return ICHullErrorOK;
}
ICHullError ICHull::Process(const uint32_t nPointsCH,
const double minVolume)
{
uint32_t addedPoints = 0;
if (nPointsCH < 3 || m_mesh.GetNVertices() < 3) {
return ICHullErrorNotEnoughPoints;
}
if (m_mesh.GetNVertices() == 3) {
m_isFlat = true;
CircularListElement<TMMTriangle>* t1 = m_mesh.AddTriangle();
CircularListElement<TMMTriangle>* t2 = m_mesh.AddTriangle();
CircularListElement<TMMVertex>* v0 = m_mesh.m_vertices.GetHead();
CircularListElement<TMMVertex>* v1 = v0->GetNext();
CircularListElement<TMMVertex>* v2 = v1->GetNext();
// Compute the normal to the plane
Vec3<double> p0 = v0->GetData().m_pos;
Vec3<double> p1 = v1->GetData().m_pos;
Vec3<double> p2 = v2->GetData().m_pos;
m_normal = (p1 - p0) ^ (p2 - p0);
m_normal.Normalize();
t1->GetData().m_vertices[0] = v0;
t1->GetData().m_vertices[1] = v1;
t1->GetData().m_vertices[2] = v2;
t2->GetData().m_vertices[0] = v1;
t2->GetData().m_vertices[1] = v0;
t2->GetData().m_vertices[2] = v2;
return ICHullErrorOK;
}
if (m_isFlat) {
m_mesh.m_triangles.Clear();
m_mesh.m_edges.Clear();
m_isFlat = false;
}
if (m_mesh.GetNTriangles() == 0) // we have to create the first polyhedron
{
ICHullError res = DoubleTriangle();
if (res != ICHullErrorOK) {
return res;
}
else {
addedPoints += 3;
}
}
CircularList<TMMVertex>& vertices = m_mesh.GetVertices();
while (!vertices.GetData().m_tag && addedPoints < nPointsCH) // not processed
{
if (!FindMaxVolumePoint((addedPoints > 4) ? minVolume : 0.0)) {
break;
}
vertices.GetData().m_tag = true;
if (ProcessPoint()) {
addedPoints++;
CleanUp(addedPoints);
if (!GetMesh().CheckConsistancy()) {
size_t nV = m_mesh.GetNVertices();
CircularList<TMMVertex>& vertices = m_mesh.GetVertices();
for (size_t v = 0; v < nV; ++v) {
if (vertices.GetData().m_name == sc_dummyIndex) {
vertices.Delete();
break;
}
vertices.Next();
}
return ICHullErrorInconsistent;
}
vertices.Next();
}
}
// delete remaining points
while (!vertices.GetData().m_tag) {
vertices.Delete();
}
if (m_isFlat) {
SArray<CircularListElement<TMMTriangle>*> trianglesToDuplicate;
size_t nT = m_mesh.GetNTriangles();
for (size_t f = 0; f < nT; f++) {
TMMTriangle& currentTriangle = m_mesh.m_triangles.GetHead()->GetData();
if (currentTriangle.m_vertices[0]->GetData().m_name == sc_dummyIndex || currentTriangle.m_vertices[1]->GetData().m_name == sc_dummyIndex || currentTriangle.m_vertices[2]->GetData().m_name == sc_dummyIndex) {
m_trianglesToDelete.PushBack(m_mesh.m_triangles.GetHead());
for (int32_t k = 0; k < 3; k++) {
for (int32_t h = 0; h < 2; h++) {
if (currentTriangle.m_edges[k]->GetData().m_triangles[h] == m_mesh.m_triangles.GetHead()) {
currentTriangle.m_edges[k]->GetData().m_triangles[h] = 0;
break;
}
}
}
}
else {
trianglesToDuplicate.PushBack(m_mesh.m_triangles.GetHead());
}
m_mesh.m_triangles.Next();
}
size_t nE = m_mesh.GetNEdges();
for (size_t e = 0; e < nE; e++) {
TMMEdge& currentEdge = m_mesh.m_edges.GetHead()->GetData();
if (currentEdge.m_triangles[0] == 0 && currentEdge.m_triangles[1] == 0) {
m_edgesToDelete.PushBack(m_mesh.m_edges.GetHead());
}
m_mesh.m_edges.Next();
}
size_t nV = m_mesh.GetNVertices();
CircularList<TMMVertex>& vertices = m_mesh.GetVertices();
for (size_t v = 0; v < nV; ++v) {
if (vertices.GetData().m_name == sc_dummyIndex) {
vertices.Delete();
}
else {
vertices.GetData().m_tag = false;
vertices.Next();
}
}
CleanEdges();
CleanTriangles();
CircularListElement<TMMTriangle>* newTriangle;
for (size_t t = 0; t < trianglesToDuplicate.Size(); t++) {
newTriangle = m_mesh.AddTriangle();
newTriangle->GetData().m_vertices[0] = trianglesToDuplicate[t]->GetData().m_vertices[1];
newTriangle->GetData().m_vertices[1] = trianglesToDuplicate[t]->GetData().m_vertices[0];
newTriangle->GetData().m_vertices[2] = trianglesToDuplicate[t]->GetData().m_vertices[2];
}
}
return ICHullErrorOK;
}
bool ICHull::FindMaxVolumePoint(const double minVolume)
{
CircularList<TMMVertex>& vertices = m_mesh.GetVertices();
CircularListElement<TMMVertex>* vMaxVolume = 0;
CircularListElement<TMMVertex>* vHeadPrev = vertices.GetHead()->GetPrev();
double maxVolume = minVolume;
double volume = 0.0;
while (!vertices.GetData().m_tag) // not processed
{
if (ComputePointVolume(volume, false)) {
if (maxVolume < volume) {
maxVolume = volume;
vMaxVolume = vertices.GetHead();
}
vertices.Next();
}
}
CircularListElement<TMMVertex>* vHead = vHeadPrev->GetNext();
vertices.GetHead() = vHead;
if (!vMaxVolume) {
return false;
}
if (vMaxVolume != vHead) {
Vec3<double> pos = vHead->GetData().m_pos;
int32_t id = vHead->GetData().m_name;
vHead->GetData().m_pos = vMaxVolume->GetData().m_pos;
vHead->GetData().m_name = vMaxVolume->GetData().m_name;
vMaxVolume->GetData().m_pos = pos;
vHead->GetData().m_name = id;
}
return true;
}
ICHullError ICHull::DoubleTriangle()
{
// find three non colinear points
m_isFlat = false;
CircularList<TMMVertex>& vertices = m_mesh.GetVertices();
CircularListElement<TMMVertex>* v0 = vertices.GetHead();
while (Colinear(v0->GetData().m_pos,
v0->GetNext()->GetData().m_pos,
v0->GetNext()->GetNext()->GetData().m_pos)) {
if ((v0 = v0->GetNext()) == vertices.GetHead()) {
return ICHullErrorCoplanarPoints;
}
}
CircularListElement<TMMVertex>* v1 = v0->GetNext();
CircularListElement<TMMVertex>* v2 = v1->GetNext();
// mark points as processed
v0->GetData().m_tag = v1->GetData().m_tag = v2->GetData().m_tag = true;
// create two triangles
CircularListElement<TMMTriangle>* f0 = MakeFace(v0, v1, v2, 0);
MakeFace(v2, v1, v0, f0);
// find a fourth non-coplanar point to form tetrahedron
CircularListElement<TMMVertex>* v3 = v2->GetNext();
vertices.GetHead() = v3;
double vol = ComputeVolume4(v0->GetData().m_pos, v1->GetData().m_pos, v2->GetData().m_pos, v3->GetData().m_pos);
while (fabs(vol) < sc_eps && !v3->GetNext()->GetData().m_tag) {
v3 = v3->GetNext();
vol = ComputeVolume4(v0->GetData().m_pos, v1->GetData().m_pos, v2->GetData().m_pos, v3->GetData().m_pos);
}
if (fabs(vol) < sc_eps) {
// compute the barycenter
Vec3<double> bary(0.0, 0.0, 0.0);
CircularListElement<TMMVertex>* vBary = v0;
do {
bary += vBary->GetData().m_pos;
} while ((vBary = vBary->GetNext()) != v0);
bary /= static_cast<double>(vertices.GetSize());
// Compute the normal to the plane
Vec3<double> p0 = v0->GetData().m_pos;
Vec3<double> p1 = v1->GetData().m_pos;
Vec3<double> p2 = v2->GetData().m_pos;
m_normal = (p1 - p0) ^ (p2 - p0);
m_normal.Normalize();
// add dummy vertex placed at (bary + normal)
vertices.GetHead() = v2;
Vec3<double> newPt = bary + m_normal;
AddPoint(newPt, sc_dummyIndex);
m_isFlat = true;
return ICHullErrorOK;
}
else if (v3 != vertices.GetHead()) {
TMMVertex temp;
temp.m_name = v3->GetData().m_name;
temp.m_pos = v3->GetData().m_pos;
v3->GetData().m_name = vertices.GetHead()->GetData().m_name;
v3->GetData().m_pos = vertices.GetHead()->GetData().m_pos;
vertices.GetHead()->GetData().m_name = temp.m_name;
vertices.GetHead()->GetData().m_pos = temp.m_pos;
}
return ICHullErrorOK;
}
CircularListElement<TMMTriangle>* ICHull::MakeFace(CircularListElement<TMMVertex>* v0,
CircularListElement<TMMVertex>* v1,
CircularListElement<TMMVertex>* v2,
CircularListElement<TMMTriangle>* fold)
{
CircularListElement<TMMEdge>* e0;
CircularListElement<TMMEdge>* e1;
CircularListElement<TMMEdge>* e2;
int32_t index = 0;
if (!fold) // if first face to be created
{
e0 = m_mesh.AddEdge(); // create the three edges
e1 = m_mesh.AddEdge();
e2 = m_mesh.AddEdge();
}
else // otherwise re-use existing edges (in reverse order)
{
e0 = fold->GetData().m_edges[2];
e1 = fold->GetData().m_edges[1];
e2 = fold->GetData().m_edges[0];
index = 1;
}
e0->GetData().m_vertices[0] = v0;
e0->GetData().m_vertices[1] = v1;
e1->GetData().m_vertices[0] = v1;
e1->GetData().m_vertices[1] = v2;
e2->GetData().m_vertices[0] = v2;
e2->GetData().m_vertices[1] = v0;
// create the new face
CircularListElement<TMMTriangle>* f = m_mesh.AddTriangle();
f->GetData().m_edges[0] = e0;
f->GetData().m_edges[1] = e1;
f->GetData().m_edges[2] = e2;
f->GetData().m_vertices[0] = v0;
f->GetData().m_vertices[1] = v1;
f->GetData().m_vertices[2] = v2;
// link edges to face f
e0->GetData().m_triangles[index] = e1->GetData().m_triangles[index] = e2->GetData().m_triangles[index] = f;
return f;
}
CircularListElement<TMMTriangle>* ICHull::MakeConeFace(CircularListElement<TMMEdge>* e, CircularListElement<TMMVertex>* p)
{
// create two new edges if they don't already exist
CircularListElement<TMMEdge>* newEdges[2];
for (int32_t i = 0; i < 2; ++i) {
if (!(newEdges[i] = e->GetData().m_vertices[i]->GetData().m_duplicate)) { // if the edge doesn't exits add it and mark the vertex as duplicated
newEdges[i] = m_mesh.AddEdge();
newEdges[i]->GetData().m_vertices[0] = e->GetData().m_vertices[i];
newEdges[i]->GetData().m_vertices[1] = p;
e->GetData().m_vertices[i]->GetData().m_duplicate = newEdges[i];
}
}
// make the new face
CircularListElement<TMMTriangle>* newFace = m_mesh.AddTriangle();
newFace->GetData().m_edges[0] = e;
newFace->GetData().m_edges[1] = newEdges[0];
newFace->GetData().m_edges[2] = newEdges[1];
MakeCCW(newFace, e, p);
for (int32_t i = 0; i < 2; ++i) {
for (int32_t j = 0; j < 2; ++j) {
if (!newEdges[i]->GetData().m_triangles[j]) {
newEdges[i]->GetData().m_triangles[j] = newFace;
break;
}
}
}
return newFace;
}
bool ICHull::ComputePointVolume(double& totalVolume, bool markVisibleFaces)
{
// mark visible faces
CircularListElement<TMMTriangle>* fHead = m_mesh.GetTriangles().GetHead();
CircularListElement<TMMTriangle>* f = fHead;
CircularList<TMMVertex>& vertices = m_mesh.GetVertices();
CircularListElement<TMMVertex>* vertex0 = vertices.GetHead();
bool visible = false;
Vec3<double> pos0 = Vec3<double>(vertex0->GetData().m_pos.X(),
vertex0->GetData().m_pos.Y(),
vertex0->GetData().m_pos.Z());
double vol = 0.0;
totalVolume = 0.0;
Vec3<double> ver0, ver1, ver2;
do {
ver0.X() = f->GetData().m_vertices[0]->GetData().m_pos.X();
ver0.Y() = f->GetData().m_vertices[0]->GetData().m_pos.Y();
ver0.Z() = f->GetData().m_vertices[0]->GetData().m_pos.Z();
ver1.X() = f->GetData().m_vertices[1]->GetData().m_pos.X();
ver1.Y() = f->GetData().m_vertices[1]->GetData().m_pos.Y();
ver1.Z() = f->GetData().m_vertices[1]->GetData().m_pos.Z();
ver2.X() = f->GetData().m_vertices[2]->GetData().m_pos.X();
ver2.Y() = f->GetData().m_vertices[2]->GetData().m_pos.Y();
ver2.Z() = f->GetData().m_vertices[2]->GetData().m_pos.Z();
vol = ComputeVolume4(ver0, ver1, ver2, pos0);
if (vol < -sc_eps) {
vol = fabs(vol);
totalVolume += vol;
if (markVisibleFaces) {
f->GetData().m_visible = true;
m_trianglesToDelete.PushBack(f);
}
visible = true;
}
f = f->GetNext();
} while (f != fHead);
if (m_trianglesToDelete.Size() == m_mesh.m_triangles.GetSize()) {
for (size_t i = 0; i < m_trianglesToDelete.Size(); i++) {
m_trianglesToDelete[i]->GetData().m_visible = false;
}
visible = false;
}
// if no faces visible from p then p is inside the hull
if (!visible && markVisibleFaces) {
vertices.Delete();
m_trianglesToDelete.Resize(0);
return false;
}
return true;
}
bool ICHull::ProcessPoint()
{
double totalVolume = 0.0;
if (!ComputePointVolume(totalVolume, true)) {
return false;
}
// Mark edges in interior of visible region for deletion.
// Create a new face based on each border edge
CircularListElement<TMMVertex>* v0 = m_mesh.GetVertices().GetHead();
CircularListElement<TMMEdge>* eHead = m_mesh.GetEdges().GetHead();
CircularListElement<TMMEdge>* e = eHead;
CircularListElement<TMMEdge>* tmp = 0;
int32_t nvisible = 0;
m_edgesToDelete.Resize(0);
m_edgesToUpdate.Resize(0);
do {
tmp = e->GetNext();
nvisible = 0;
for (int32_t k = 0; k < 2; k++) {
if (e->GetData().m_triangles[k]->GetData().m_visible) {
nvisible++;
}
}
if (nvisible == 2) {
m_edgesToDelete.PushBack(e);
}
else if (nvisible == 1) {
e->GetData().m_newFace = MakeConeFace(e, v0);
m_edgesToUpdate.PushBack(e);
}
e = tmp;
} while (e != eHead);
return true;
}
bool ICHull::MakeCCW(CircularListElement<TMMTriangle>* f,
CircularListElement<TMMEdge>* e,
CircularListElement<TMMVertex>* v)
{
// the visible face adjacent to e
CircularListElement<TMMTriangle>* fv;
if (e->GetData().m_triangles[0]->GetData().m_visible) {
fv = e->GetData().m_triangles[0];
}
else {
fv = e->GetData().m_triangles[1];
}
// set vertex[0] and vertex[1] to have the same orientation as the corresponding vertices of fv.
int32_t i; // index of e->m_vertices[0] in fv
CircularListElement<TMMVertex>* v0 = e->GetData().m_vertices[0];
CircularListElement<TMMVertex>* v1 = e->GetData().m_vertices[1];
for (i = 0; fv->GetData().m_vertices[i] != v0; i++)
;
if (fv->GetData().m_vertices[(i + 1) % 3] != e->GetData().m_vertices[1]) {
f->GetData().m_vertices[0] = v1;
f->GetData().m_vertices[1] = v0;
}
else {
f->GetData().m_vertices[0] = v0;
f->GetData().m_vertices[1] = v1;
// swap edges
CircularListElement<TMMEdge>* tmp = f->GetData().m_edges[0];
f->GetData().m_edges[0] = f->GetData().m_edges[1];
f->GetData().m_edges[1] = tmp;
}
f->GetData().m_vertices[2] = v;
return true;
}
bool ICHull::CleanUp(uint32_t& addedPoints)
{
bool r0 = CleanEdges();
bool r1 = CleanTriangles();
bool r2 = CleanVertices(addedPoints);
return r0 && r1 && r2;
}
bool ICHull::CleanEdges()
{
// integrate the new faces into the data structure
CircularListElement<TMMEdge>* e;
const size_t ne_update = m_edgesToUpdate.Size();
for (size_t i = 0; i < ne_update; ++i) {
e = m_edgesToUpdate[i];
if (e->GetData().m_newFace) {
if (e->GetData().m_triangles[0]->GetData().m_visible) {
e->GetData().m_triangles[0] = e->GetData().m_newFace;
}
else {
e->GetData().m_triangles[1] = e->GetData().m_newFace;
}
e->GetData().m_newFace = 0;
}
}
// delete edges maked for deletion
CircularList<TMMEdge>& edges = m_mesh.GetEdges();
const size_t ne_delete = m_edgesToDelete.Size();
for (size_t i = 0; i < ne_delete; ++i) {
edges.Delete(m_edgesToDelete[i]);
}
m_edgesToDelete.Resize(0);
m_edgesToUpdate.Resize(0);
return true;
}
bool ICHull::CleanTriangles()
{
CircularList<TMMTriangle>& triangles = m_mesh.GetTriangles();
const size_t nt_delete = m_trianglesToDelete.Size();
for (size_t i = 0; i < nt_delete; ++i) {
triangles.Delete(m_trianglesToDelete[i]);
}
m_trianglesToDelete.Resize(0);
return true;
}
bool ICHull::CleanVertices(uint32_t& addedPoints)
{
// mark all vertices incident to some undeleted edge as on the hull
CircularList<TMMEdge>& edges = m_mesh.GetEdges();
CircularListElement<TMMEdge>* e = edges.GetHead();
size_t nE = edges.GetSize();
for (size_t i = 0; i < nE; i++) {
e->GetData().m_vertices[0]->GetData().m_onHull = true;
e->GetData().m_vertices[1]->GetData().m_onHull = true;
e = e->GetNext();
}
// delete all the vertices that have been processed but are not on the hull
CircularList<TMMVertex>& vertices = m_mesh.GetVertices();
CircularListElement<TMMVertex>* vHead = vertices.GetHead();
CircularListElement<TMMVertex>* v = vHead;
v = v->GetPrev();
do {
if (v->GetData().m_tag && !v->GetData().m_onHull) {
CircularListElement<TMMVertex>* tmp = v->GetPrev();
vertices.Delete(v);
v = tmp;
addedPoints--;
}
else {
v->GetData().m_duplicate = 0;
v->GetData().m_onHull = false;
v = v->GetPrev();
}
} while (v->GetData().m_tag && v != vHead);
return true;
}
void ICHull::Clear()
{
m_mesh.Clear();
m_edgesToDelete.Resize(0);
m_edgesToUpdate.Resize(0);
m_trianglesToDelete.Resize(0);
m_isFlat = false;
}
const ICHull& ICHull::operator=(ICHull& rhs)
{
if (&rhs != this) {
m_mesh.Copy(rhs.m_mesh);
m_edgesToDelete = rhs.m_edgesToDelete;
m_edgesToUpdate = rhs.m_edgesToUpdate;
m_trianglesToDelete = rhs.m_trianglesToDelete;
m_isFlat = rhs.m_isFlat;
}
return (*this);
}
bool ICHull::IsInside(const Vec3<double>& pt0, const double eps)
{
const Vec3<double> pt(pt0.X(), pt0.Y(), pt0.Z());
if (m_isFlat) {
size_t nT = m_mesh.m_triangles.GetSize();
Vec3<double> ver0, ver1, ver2, a, b, c;
double u, v;
for (size_t t = 0; t < nT; t++) {
ver0.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.X();
ver0.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.Y();
ver0.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.Z();
ver1.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.X();
ver1.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.Y();
ver1.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.Z();
ver2.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.X();
ver2.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.Y();
ver2.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.Z();
a = ver1 - ver0;
b = ver2 - ver0;
c = pt - ver0;
u = c * a;
v = c * b;
if (u >= 0.0 && u <= 1.0 && v >= 0.0 && u + v <= 1.0) {
return true;
}
m_mesh.m_triangles.Next();
}
return false;
}
else {
size_t nT = m_mesh.m_triangles.GetSize();
Vec3<double> ver0, ver1, ver2;
double vol;
for (size_t t = 0; t < nT; t++) {
ver0.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.X();
ver0.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.Y();
ver0.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[0]->GetData().m_pos.Z();
ver1.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.X();
ver1.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.Y();
ver1.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[1]->GetData().m_pos.Z();
ver2.X() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.X();
ver2.Y() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.Y();
ver2.Z() = m_mesh.m_triangles.GetHead()->GetData().m_vertices[2]->GetData().m_pos.Z();
vol = ComputeVolume4(ver0, ver1, ver2, pt);
if (vol < eps) {
return false;
}
m_mesh.m_triangles.Next();
}
return true;
}
}
}
| 28,913 | C++ | 38.5 | 756 | 0.562896 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdRaycastMesh.cpp | #include "vhacdRaycastMesh.h"
#include <math.h>
#include <assert.h>
namespace RAYCAST_MESH
{
/* a = b - c */
#define vector(a,b,c) \
(a)[0] = (b)[0] - (c)[0]; \
(a)[1] = (b)[1] - (c)[1]; \
(a)[2] = (b)[2] - (c)[2];
#define innerProduct(v,q) \
((v)[0] * (q)[0] + \
(v)[1] * (q)[1] + \
(v)[2] * (q)[2])
#define crossProduct(a,b,c) \
(a)[0] = (b)[1] * (c)[2] - (c)[1] * (b)[2]; \
(a)[1] = (b)[2] * (c)[0] - (c)[2] * (b)[0]; \
(a)[2] = (b)[0] * (c)[1] - (c)[0] * (b)[1];
static inline bool rayIntersectsTriangle(const double *p,const double *d,const double *v0,const double *v1,const double *v2,double &t)
{
double e1[3],e2[3],h[3],s[3],q[3];
double a,f,u,v;
vector(e1,v1,v0);
vector(e2,v2,v0);
crossProduct(h,d,e2);
a = innerProduct(e1,h);
if (a > -0.00001 && a < 0.00001)
return(false);
f = 1/a;
vector(s,p,v0);
u = f * (innerProduct(s,h));
if (u < 0.0 || u > 1.0)
return(false);
crossProduct(q,s,e1);
v = f * innerProduct(d,q);
if (v < 0.0 || u + v > 1.0)
return(false);
// at this stage we can compute t to find out where
// the intersection point is on the line
t = f * innerProduct(e2,q);
if (t > 0) // ray intersection
return(true);
else // this means that there is a line intersection
// but not a ray intersection
return (false);
}
static double getPointDistance(const double *p1, const double *p2)
{
double dx = p1[0] - p2[0];
double dy = p1[1] - p2[1];
double dz = p1[2] - p2[2];
return sqrt(dx*dx + dy*dy + dz*dz);
}
class MyRaycastMesh : public VHACD::RaycastMesh
{
public:
template <class T>
MyRaycastMesh(uint32_t vcount,
const T *vertices,
uint32_t tcount,
const uint32_t *indices)
{
mVcount = vcount;
mVertices = new double[mVcount * 3];
for (uint32_t i = 0; i < mVcount; i++)
{
mVertices[i * 3 + 0] = vertices[0];
mVertices[i * 3 + 1] = vertices[1];
mVertices[i * 3 + 2] = vertices[2];
vertices += 3;
}
mTcount = tcount;
mIndices = new uint32_t[mTcount * 3];
for (uint32_t i = 0; i < mTcount; i++)
{
mIndices[i * 3 + 0] = indices[0];
mIndices[i * 3 + 1] = indices[1];
mIndices[i * 3 + 2] = indices[2];
indices += 3;
}
}
~MyRaycastMesh(void)
{
delete[]mVertices;
delete[]mIndices;
}
virtual void release(void)
{
delete this;
}
virtual bool raycast(const double *from, // The starting point of the raycast
const double *to, // The ending point of the raycast
const double *closestToPoint, // The point to match the nearest hit location (can just be the 'from' location of no specific point)
double *hitLocation, // The point where the ray hit nearest to the 'closestToPoint' location
double *hitDistance) final // The distance the ray traveled to the hit location
{
bool ret = false;
double dir[3];
dir[0] = to[0] - from[0];
dir[1] = to[1] - from[1];
dir[2] = to[2] - from[2];
double distance = sqrt( dir[0]*dir[0] + dir[1]*dir[1]+dir[2]*dir[2] );
if ( distance < 0.0000000001f ) return false;
double recipDistance = 1.0f / distance;
dir[0]*=recipDistance;
dir[1]*=recipDistance;
dir[2]*=recipDistance;
const uint32_t *indices = mIndices;
const double *vertices = mVertices;
double nearestDistance = distance;
for (uint32_t tri=0; tri<mTcount; tri++)
{
uint32_t i1 = indices[tri*3+0];
uint32_t i2 = indices[tri*3+1];
uint32_t i3 = indices[tri*3+2];
const double *p1 = &vertices[i1*3];
const double *p2 = &vertices[i2*3];
const double *p3 = &vertices[i3*3];
double t;
if ( rayIntersectsTriangle(from,dir,p1,p2,p3,t))
{
double hitPos[3];
hitPos[0] = from[0] + dir[0] * t;
hitPos[1] = from[1] + dir[1] * t;
hitPos[2] = from[2] + dir[2] * t;
double pointDistance = getPointDistance(hitPos, closestToPoint);
if (pointDistance < nearestDistance )
{
nearestDistance = pointDistance;
if ( hitLocation )
{
hitLocation[0] = hitPos[0];
hitLocation[1] = hitPos[1];
hitLocation[2] = hitPos[2];
}
if ( hitDistance )
{
*hitDistance = pointDistance;
}
ret = true;
}
}
}
return ret;
}
uint32_t mVcount;
double *mVertices;
uint32_t mTcount;
uint32_t *mIndices;
};
};
using namespace RAYCAST_MESH;
namespace VHACD
{
RaycastMesh * RaycastMesh::createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh
const double *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc.
uint32_t tcount, // The number of triangles in the source triangle mesh
const uint32_t *indices) // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ...
{
MyRaycastMesh *m = new MyRaycastMesh(vcount, vertices, tcount, indices);
return static_cast<RaycastMesh *>(m);
}
RaycastMesh * RaycastMesh::createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh
const float *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc.
uint32_t tcount, // The number of triangles in the source triangle mesh
const uint32_t *indices) // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ...
{
MyRaycastMesh *m = new MyRaycastMesh(vcount, vertices, tcount, indices);
return static_cast<RaycastMesh *>(m);
}
} // end of VHACD namespace | 6,352 | C++ | 29.543269 | 141 | 0.515113 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/vhacdMesh.cpp | /* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define _CRT_SECURE_NO_WARNINGS
#include "btConvexHullComputer.h"
#include "vhacdMesh.h"
#include <fstream>
#include <iosfwd>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <string>
namespace VHACD {
Mesh::Mesh()
{
m_diag = 1.0;
}
Mesh::~Mesh()
{
}
Vec3<double>& Mesh::ComputeCenter(void)
{
const size_t nV = GetNPoints();
if (nV)
{
m_minBB = GetPoint(0);
m_maxBB = GetPoint(0);
for (size_t v = 1; v < nV; v++)
{
Vec3<double> p = GetPoint(v);
if (p.X() < m_minBB.X())
{
m_minBB.X() = p.X();
}
if (p.Y() < m_minBB.Y())
{
m_minBB.Y() = p.Y();
}
if (p.Z() < m_minBB.Z())
{
m_minBB.Z() = p.Z();
}
if (p.X() > m_maxBB.X())
{
m_maxBB.X() = p.X();
}
if (p.Y() > m_maxBB.Y())
{
m_maxBB.Y() = p.Y();
}
if (p.Z() > m_maxBB.Z())
{
m_maxBB.Z() = p.Z();
}
}
m_center.X() = (m_maxBB.X() - m_minBB.X())*0.5 + m_minBB.X();
m_center.Y() = (m_maxBB.Y() - m_minBB.Y())*0.5 + m_minBB.Y();
m_center.Z() = (m_maxBB.Z() - m_minBB.Z())*0.5 + m_minBB.Z();
}
return m_center;
}
double Mesh::ComputeVolume() const
{
const size_t nV = GetNPoints();
const size_t nT = GetNTriangles();
if (nV == 0 || nT == 0) {
return 0.0;
}
Vec3<double> bary(0.0, 0.0, 0.0);
for (size_t v = 0; v < nV; v++) {
bary += GetPoint(v);
}
bary /= static_cast<double>(nV);
Vec3<double> ver0, ver1, ver2;
double totalVolume = 0.0;
for (int32_t t = 0; t < int32_t(nT); t++) {
const Vec3<int32_t>& tri = GetTriangle(t);
ver0 = GetPoint(tri[0]);
ver1 = GetPoint(tri[1]);
ver2 = GetPoint(tri[2]);
totalVolume += ComputeVolume4(ver0, ver1, ver2, bary);
}
return totalVolume / 6.0;
}
void Mesh::ComputeConvexHull(const double* const pts,
const size_t nPts)
{
ResizePoints(0);
ResizeTriangles(0);
btConvexHullComputer ch;
ch.compute(pts, 3 * sizeof(double), (int32_t)nPts, -1.0, -1.0);
for (int32_t v = 0; v < ch.vertices.size(); v++) {
AddPoint(Vec3<double>(ch.vertices[v].getX(), ch.vertices[v].getY(), ch.vertices[v].getZ()));
}
const int32_t nt = ch.faces.size();
for (int32_t t = 0; t < nt; ++t) {
const btConvexHullComputer::Edge* sourceEdge = &(ch.edges[ch.faces[t]]);
int32_t a = sourceEdge->getSourceVertex();
int32_t b = sourceEdge->getTargetVertex();
const btConvexHullComputer::Edge* edge = sourceEdge->getNextEdgeOfFace();
int32_t c = edge->getTargetVertex();
while (c != a) {
AddTriangle(Vec3<int32_t>(a, b, c));
edge = edge->getNextEdgeOfFace();
b = c;
c = edge->getTargetVertex();
}
}
}
void Mesh::Clip(const Plane& plane,
SArray<Vec3<double> >& positivePart,
SArray<Vec3<double> >& negativePart) const
{
const size_t nV = GetNPoints();
if (nV == 0) {
return;
}
double d;
for (size_t v = 0; v < nV; v++) {
const Vec3<double>& pt = GetPoint(v);
d = plane.m_a * pt[0] + plane.m_b * pt[1] + plane.m_c * pt[2] + plane.m_d;
if (d > 0.0) {
positivePart.PushBack(pt);
}
else if (d < 0.0) {
negativePart.PushBack(pt);
}
else {
positivePart.PushBack(pt);
negativePart.PushBack(pt);
}
}
}
bool Mesh::IsInside(const Vec3<double>& pt) const
{
const size_t nV = GetNPoints();
const size_t nT = GetNTriangles();
if (nV == 0 || nT == 0) {
return false;
}
Vec3<double> ver0, ver1, ver2;
double volume;
for (int32_t t = 0; t < int32_t(nT); t++) {
const Vec3<int32_t>& tri = GetTriangle(t);
ver0 = GetPoint(tri[0]);
ver1 = GetPoint(tri[1]);
ver2 = GetPoint(tri[2]);
volume = ComputeVolume4(ver0, ver1, ver2, pt);
if (volume < 0.0) {
return false;
}
}
return true;
}
double Mesh::ComputeDiagBB()
{
const size_t nPoints = GetNPoints();
if (nPoints == 0)
return 0.0;
Vec3<double> minBB = m_points[0];
Vec3<double> maxBB = m_points[0];
double x, y, z;
for (size_t v = 1; v < nPoints; v++) {
x = m_points[v][0];
y = m_points[v][1];
z = m_points[v][2];
if (x < minBB[0])
minBB[0] = x;
else if (x > maxBB[0])
maxBB[0] = x;
if (y < minBB[1])
minBB[1] = y;
else if (y > maxBB[1])
maxBB[1] = y;
if (z < minBB[2])
minBB[2] = z;
else if (z > maxBB[2])
maxBB[2] = z;
}
return (m_diag = (maxBB - minBB).GetNorm());
}
#ifdef VHACD_DEBUG_MESH
bool Mesh::SaveVRML2(const std::string& fileName) const
{
std::ofstream fout(fileName.c_str());
if (fout.is_open()) {
const Material material;
if (SaveVRML2(fout, material)) {
fout.close();
return true;
}
return false;
}
return false;
}
bool Mesh::SaveVRML2(std::ofstream& fout, const Material& material) const
{
if (fout.is_open()) {
fout.setf(std::ios::fixed, std::ios::floatfield);
fout.setf(std::ios::showpoint);
fout.precision(6);
size_t nV = m_points.Size();
size_t nT = m_triangles.Size();
fout << "#VRML V2.0 utf8" << std::endl;
fout << "" << std::endl;
fout << "# Vertices: " << nV << std::endl;
fout << "# Triangles: " << nT << std::endl;
fout << "" << std::endl;
fout << "Group {" << std::endl;
fout << " children [" << std::endl;
fout << " Shape {" << std::endl;
fout << " appearance Appearance {" << std::endl;
fout << " material Material {" << std::endl;
fout << " diffuseColor " << material.m_diffuseColor[0] << " "
<< material.m_diffuseColor[1] << " "
<< material.m_diffuseColor[2] << std::endl;
fout << " ambientIntensity " << material.m_ambientIntensity << std::endl;
fout << " specularColor " << material.m_specularColor[0] << " "
<< material.m_specularColor[1] << " "
<< material.m_specularColor[2] << std::endl;
fout << " emissiveColor " << material.m_emissiveColor[0] << " "
<< material.m_emissiveColor[1] << " "
<< material.m_emissiveColor[2] << std::endl;
fout << " shininess " << material.m_shininess << std::endl;
fout << " transparency " << material.m_transparency << std::endl;
fout << " }" << std::endl;
fout << " }" << std::endl;
fout << " geometry IndexedFaceSet {" << std::endl;
fout << " ccw TRUE" << std::endl;
fout << " solid TRUE" << std::endl;
fout << " convex TRUE" << std::endl;
if (nV > 0) {
fout << " coord DEF co Coordinate {" << std::endl;
fout << " point [" << std::endl;
for (size_t v = 0; v < nV; v++) {
fout << " " << m_points[v][0] << " "
<< m_points[v][1] << " "
<< m_points[v][2] << "," << std::endl;
}
fout << " ]" << std::endl;
fout << " }" << std::endl;
}
if (nT > 0) {
fout << " coordIndex [ " << std::endl;
for (size_t f = 0; f < nT; f++) {
fout << " " << m_triangles[f][0] << ", "
<< m_triangles[f][1] << ", "
<< m_triangles[f][2] << ", -1," << std::endl;
}
fout << " ]" << std::endl;
}
fout << " }" << std::endl;
fout << " }" << std::endl;
fout << " ]" << std::endl;
fout << "}" << std::endl;
return true;
}
return false;
}
bool Mesh::SaveOFF(const std::string& fileName) const
{
std::ofstream fout(fileName.c_str());
if (fout.is_open()) {
size_t nV = m_points.Size();
size_t nT = m_triangles.Size();
fout << "OFF" << std::endl;
fout << nV << " " << nT << " " << 0 << std::endl;
for (size_t v = 0; v < nV; v++) {
fout << m_points[v][0] << " "
<< m_points[v][1] << " "
<< m_points[v][2] << std::endl;
}
for (size_t f = 0; f < nT; f++) {
fout << "3 " << m_triangles[f][0] << " "
<< m_triangles[f][1] << " "
<< m_triangles[f][2] << std::endl;
}
fout.close();
return true;
}
return false;
}
bool Mesh::LoadOFF(const std::string& fileName, bool invert)
{
FILE* fid = fopen(fileName.c_str(), "r");
if (fid) {
const std::string strOFF("OFF");
char temp[1024];
fscanf(fid, "%s", temp);
if (std::string(temp) != strOFF) {
fclose(fid);
return false;
}
else {
int32_t nv = 0;
int32_t nf = 0;
int32_t ne = 0;
fscanf(fid, "%i", &nv);
fscanf(fid, "%i", &nf);
fscanf(fid, "%i", &ne);
m_points.Resize(nv);
m_triangles.Resize(nf);
Vec3<double> coord;
float x, y, z;
for (int32_t p = 0; p < nv; p++) {
fscanf(fid, "%f", &x);
fscanf(fid, "%f", &y);
fscanf(fid, "%f", &z);
m_points[p][0] = x;
m_points[p][1] = y;
m_points[p][2] = z;
}
int32_t i, j, k, s;
for (int32_t t = 0; t < nf; ++t) {
fscanf(fid, "%i", &s);
if (s == 3) {
fscanf(fid, "%i", &i);
fscanf(fid, "%i", &j);
fscanf(fid, "%i", &k);
m_triangles[t][0] = i;
if (invert) {
m_triangles[t][1] = k;
m_triangles[t][2] = j;
}
else {
m_triangles[t][1] = j;
m_triangles[t][2] = k;
}
}
else // Fix me: support only triangular meshes
{
for (int32_t h = 0; h < s; ++h)
fscanf(fid, "%i", &s);
}
}
fclose(fid);
}
}
else {
return false;
}
return true;
}
#endif // VHACD_DEBUG_MESH
}
| 12,636 | C++ | 33.433242 | 756 | 0.4651 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/btAlignedAllocator.cpp | /*
Bullet Continuous Collision Detection and Physics Library
Copyright (c) 2003-2006 Erwin Coumans http://continuousphysics.com/Bullet/
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "btAlignedAllocator.h"
#ifdef _MSC_VER
#pragma warning(disable:4311 4302)
#endif
int32_t gNumAlignedAllocs = 0;
int32_t gNumAlignedFree = 0;
int32_t gTotalBytesAlignedAllocs = 0; //detect memory leaks
static void* btAllocDefault(size_t size)
{
return malloc(size);
}
static void btFreeDefault(void* ptr)
{
free(ptr);
}
static btAllocFunc* sAllocFunc = btAllocDefault;
static btFreeFunc* sFreeFunc = btFreeDefault;
#if defined(BT_HAS_ALIGNED_ALLOCATOR)
#include <malloc.h>
static void* btAlignedAllocDefault(size_t size, int32_t alignment)
{
return _aligned_malloc(size, (size_t)alignment);
}
static void btAlignedFreeDefault(void* ptr)
{
_aligned_free(ptr);
}
#elif defined(__CELLOS_LV2__)
#include <stdlib.h>
static inline void* btAlignedAllocDefault(size_t size, int32_t alignment)
{
return memalign(alignment, size);
}
static inline void btAlignedFreeDefault(void* ptr)
{
free(ptr);
}
#else
static inline void* btAlignedAllocDefault(size_t size, int32_t alignment)
{
void* ret;
char* real;
unsigned long offset;
real = (char*)sAllocFunc(size + sizeof(void*) + (alignment - 1));
if (real) {
offset = (alignment - (unsigned long)(real + sizeof(void*))) & (alignment - 1);
ret = (void*)((real + sizeof(void*)) + offset);
*((void**)(ret)-1) = (void*)(real);
}
else {
ret = (void*)(real);
}
return (ret);
}
static inline void btAlignedFreeDefault(void* ptr)
{
void* real;
if (ptr) {
real = *((void**)(ptr)-1);
sFreeFunc(real);
}
}
#endif
static btAlignedAllocFunc* sAlignedAllocFunc = btAlignedAllocDefault;
static btAlignedFreeFunc* sAlignedFreeFunc = btAlignedFreeDefault;
void btAlignedAllocSetCustomAligned(btAlignedAllocFunc* allocFunc, btAlignedFreeFunc* freeFunc)
{
sAlignedAllocFunc = allocFunc ? allocFunc : btAlignedAllocDefault;
sAlignedFreeFunc = freeFunc ? freeFunc : btAlignedFreeDefault;
}
void btAlignedAllocSetCustom(btAllocFunc* allocFunc, btFreeFunc* freeFunc)
{
sAllocFunc = allocFunc ? allocFunc : btAllocDefault;
sFreeFunc = freeFunc ? freeFunc : btFreeDefault;
}
#ifdef BT_DEBUG_MEMORY_ALLOCATIONS
//this generic allocator provides the total allocated number of bytes
#include <stdio.h>
void* btAlignedAllocInternal(size_t size, int32_t alignment, int32_t line, char* filename)
{
void* ret;
char* real;
unsigned long offset;
gTotalBytesAlignedAllocs += size;
gNumAlignedAllocs++;
real = (char*)sAllocFunc(size + 2 * sizeof(void*) + (alignment - 1));
if (real) {
offset = (alignment - (unsigned long)(real + 2 * sizeof(void*))) & (alignment - 1);
ret = (void*)((real + 2 * sizeof(void*)) + offset);
*((void**)(ret)-1) = (void*)(real);
*((int32_t*)(ret)-2) = size;
}
else {
ret = (void*)(real); //??
}
printf("allocation#%d at address %x, from %s,line %d, size %d\n", gNumAlignedAllocs, real, filename, line, size);
int32_t* ptr = (int32_t*)ret;
*ptr = 12;
return (ret);
}
void btAlignedFreeInternal(void* ptr, int32_t line, char* filename)
{
void* real;
gNumAlignedFree++;
if (ptr) {
real = *((void**)(ptr)-1);
int32_t size = *((int32_t*)(ptr)-2);
gTotalBytesAlignedAllocs -= size;
printf("free #%d at address %x, from %s,line %d, size %d\n", gNumAlignedFree, real, filename, line, size);
sFreeFunc(real);
}
else {
printf("NULL ptr\n");
}
}
#else //BT_DEBUG_MEMORY_ALLOCATIONS
void* btAlignedAllocInternal(size_t size, int32_t alignment)
{
gNumAlignedAllocs++;
void* ptr;
ptr = sAlignedAllocFunc(size, alignment);
// printf("btAlignedAllocInternal %d, %x\n",size,ptr);
return ptr;
}
void btAlignedFreeInternal(void* ptr)
{
if (!ptr) {
return;
}
gNumAlignedFree++;
// printf("btAlignedFreeInternal %x\n",ptr);
sAlignedFreeFunc(ptr);
}
#endif //BT_DEBUG_MEMORY_ALLOCATIONS
| 4,932 | C++ | 26.254144 | 243 | 0.681671 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/VHACD-ASYNC.cpp | #include "../public/VHACD.h"
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <thread>
#include <atomic>
#include <mutex>
#include <string>
#include <float.h>
#define ENABLE_ASYNC 1
#define HACD_ALLOC(x) malloc(x)
#define HACD_FREE(x) free(x)
#define HACD_ASSERT(x) assert(x)
namespace VHACD
{
class MyHACD_API : public VHACD::IVHACD, public VHACD::IVHACD::IUserCallback, VHACD::IVHACD::IUserLogger
{
public:
MyHACD_API(void)
{
mVHACD = VHACD::CreateVHACD();
}
virtual ~MyHACD_API(void)
{
releaseHACD();
Cancel();
mVHACD->Release();
}
virtual bool Compute(const double* const _points,
const uint32_t countPoints,
const uint32_t* const _triangles,
const uint32_t countTriangles,
const Parameters& _desc) final
{
#if ENABLE_ASYNC
Cancel(); // if we previously had a solution running; cancel it.
releaseHACD();
// We need to copy the input vertices and triangles into our own buffers so we can operate
// on them safely from the background thread.
mVertices = (double *)HACD_ALLOC(sizeof(double)*countPoints * 3);
mIndices = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*countTriangles * 3);
memcpy(mVertices, _points, sizeof(double)*countPoints * 3);
memcpy(mIndices, _triangles, sizeof(uint32_t)*countTriangles * 3);
mRunning = true;
mThread = new std::thread([this, countPoints, countTriangles, _desc]()
{
ComputeNow(mVertices, countPoints, mIndices, countTriangles, _desc);
mRunning = false;
});
#else
releaseHACD();
ComputeNow(_points, countPoints, _triangles, countTriangles, _desc);
#endif
return true;
}
bool ComputeNow(const double* const points,
const uint32_t countPoints,
const uint32_t* const triangles,
const uint32_t countTriangles,
const Parameters& _desc)
{
uint32_t ret = 0;
mHullCount = 0;
mCallback = _desc.m_callback;
mLogger = _desc.m_logger;
IVHACD::Parameters desc = _desc;
// Set our intercepting callback interfaces if non-null
desc.m_callback = desc.m_callback ? this : nullptr;
desc.m_logger = desc.m_logger ? this : nullptr;
if ( countPoints )
{
bool ok = mVHACD->Compute(points, countPoints, triangles, countTriangles, desc);
if (ok)
{
ret = mVHACD->GetNConvexHulls();
mHulls = new IVHACD::ConvexHull[ret];
for (uint32_t i = 0; i < ret; i++)
{
VHACD::IVHACD::ConvexHull vhull;
mVHACD->GetConvexHull(i, vhull);
VHACD::IVHACD::ConvexHull h;
h.m_nPoints = vhull.m_nPoints;
h.m_points = (double *)HACD_ALLOC(sizeof(double) * 3 * h.m_nPoints);
memcpy(h.m_points, vhull.m_points, sizeof(double) * 3 * h.m_nPoints);
h.m_nTriangles = vhull.m_nTriangles;
h.m_triangles = (uint32_t *)HACD_ALLOC(sizeof(uint32_t) * 3 * h.m_nTriangles);
memcpy(h.m_triangles, vhull.m_triangles, sizeof(uint32_t) * 3 * h.m_nTriangles);
h.m_volume = vhull.m_volume;
h.m_center[0] = vhull.m_center[0];
h.m_center[1] = vhull.m_center[1];
h.m_center[2] = vhull.m_center[2];
mHulls[i] = h;
if (mCancel)
{
ret = 0;
break;
}
}
}
}
mHullCount = ret;
return ret ? true : false;
}
void releaseHull(VHACD::IVHACD::ConvexHull &h)
{
HACD_FREE((void *)h.m_triangles);
HACD_FREE((void *)h.m_points);
h.m_triangles = nullptr;
h.m_points = nullptr;
}
virtual void GetConvexHull(const uint32_t index, VHACD::IVHACD::ConvexHull& ch) const final
{
if ( index < mHullCount )
{
ch = mHulls[index];
}
}
void releaseHACD(void) // release memory associated with the last HACD request
{
for (uint32_t i=0; i<mHullCount; i++)
{
releaseHull(mHulls[i]);
}
delete[]mHulls;
mHulls = nullptr;
mHullCount = 0;
HACD_FREE(mVertices);
mVertices = nullptr;
HACD_FREE(mIndices);
mIndices = nullptr;
}
virtual void release(void) // release the HACD_API interface
{
delete this;
}
virtual uint32_t getHullCount(void)
{
return mHullCount;
}
virtual void Cancel() final
{
if (mRunning)
{
mVHACD->Cancel(); // Set the cancel signal to the base VHACD
}
if (mThread)
{
mThread->join(); // Wait for the thread to fully exit before we delete the instance
delete mThread;
mThread = nullptr;
Log("Convex Decomposition thread canceled\n");
}
mCancel = false; // clear the cancel semaphore
}
virtual bool Compute(const float* const points,
const uint32_t countPoints,
const uint32_t* const triangles,
const uint32_t countTriangles,
const Parameters& params) final
{
double *vertices = (double *)HACD_ALLOC(sizeof(double)*countPoints * 3);
const float *source = points;
double *dest = vertices;
for (uint32_t i = 0; i < countPoints; i++)
{
dest[0] = source[0];
dest[1] = source[1];
dest[2] = source[2];
dest += 3;
source += 3;
}
bool ret = Compute(vertices, countPoints, triangles, countTriangles, params);
HACD_FREE(vertices);
return ret;
}
virtual uint32_t GetNConvexHulls() const final
{
processPendingMessages();
return mHullCount;
}
virtual void Clean(void) final // release internally allocated memory
{
Cancel();
releaseHACD();
mVHACD->Clean();
}
virtual void Release(void) final // release IVHACD
{
delete this;
}
virtual bool OCLInit(void* const oclDevice,
IVHACD::IUserLogger* const logger = 0) final
{
return mVHACD->OCLInit(oclDevice, logger);
}
virtual bool OCLRelease(IVHACD::IUserLogger* const logger = 0) final
{
return mVHACD->OCLRelease(logger);
}
virtual void Update(const double overallProgress,
const double stageProgress,
const double operationProgress,
const char* const stage,
const char* const operation) final
{
mMessageMutex.lock();
mHaveUpdateMessage = true;
mOverallProgress = overallProgress;
mStageProgress = stageProgress;
mOperationProgress = operationProgress;
mStage = std::string(stage);
mOperation = std::string(operation);
mMessageMutex.unlock();
}
virtual void Log(const char* const msg) final
{
mMessageMutex.lock();
mHaveLogMessage = true;
mMessage = std::string(msg);
mMessageMutex.unlock();
}
virtual bool IsReady(void) const final
{
processPendingMessages();
return !mRunning;
}
// As a convenience for the calling application we only send it update and log messages from it's own main
// thread. This reduces the complexity burden on the caller by making sure it only has to deal with log
// messages in it's main application thread.
void processPendingMessages(void) const
{
// If we have a new update message and the user has specified a callback we send the message and clear the semaphore
if (mHaveUpdateMessage && mCallback)
{
mMessageMutex.lock();
mCallback->Update(mOverallProgress, mStageProgress, mOperationProgress, mStage.c_str(), mOperation.c_str());
mHaveUpdateMessage = false;
mMessageMutex.unlock();
}
// If we have a new log message and the user has specified a callback we send the message and clear the semaphore
if (mHaveLogMessage && mLogger)
{
mMessageMutex.lock();
mLogger->Log(mMessage.c_str());
mHaveLogMessage = false;
mMessageMutex.unlock();
}
}
// Will compute the center of mass of the convex hull decomposition results and return it
// in 'centerOfMass'. Returns false if the center of mass could not be computed.
virtual bool ComputeCenterOfMass(double centerOfMass[3]) const
{
bool ret = false;
centerOfMass[0] = 0;
centerOfMass[1] = 0;
centerOfMass[2] = 0;
if (mVHACD && IsReady() )
{
ret = mVHACD->ComputeCenterOfMass(centerOfMass);
}
return ret;
}
// Will analyze the HACD results and compute the constraints solutions.
// It will analyze the point at which any two convex hulls touch each other and
// return the total number of constraint pairs found
virtual uint32_t ComputeConstraints(void) final
{
uint32_t ret = 0;
if (mVHACD && IsReady())
{
ret = mVHACD->ComputeConstraints();
}
return ret;
}
virtual const Constraint *GetConstraint(uint32_t index) const final
{
const Constraint * ret = nullptr;
if (mVHACD && IsReady())
{
ret = mVHACD->GetConstraint(index);
}
return ret;
}
private:
double *mVertices{ nullptr };
uint32_t *mIndices{ nullptr };
std::atomic< uint32_t> mHullCount{ 0 };
VHACD::IVHACD::ConvexHull *mHulls{ nullptr };
VHACD::IVHACD::IUserCallback *mCallback{ nullptr };
VHACD::IVHACD::IUserLogger *mLogger{ nullptr };
VHACD::IVHACD *mVHACD{ nullptr };
std::thread *mThread{ nullptr };
std::atomic< bool > mRunning{ false };
std::atomic<bool> mCancel{ false };
// Thread safe caching mechanism for messages and update status.
// This is so that caller always gets messages in his own thread
// Member variables are marked as 'mutable' since the message dispatch function
// is called from const query methods.
mutable std::mutex mMessageMutex;
mutable std::atomic< bool > mHaveUpdateMessage{ false };
mutable std::atomic< bool > mHaveLogMessage{ false };
mutable double mOverallProgress{ 0 };
mutable double mStageProgress{ 0 };
mutable double mOperationProgress{ 0 };
mutable std::string mStage;
mutable std::string mOperation;
mutable std::string mMessage;
};
IVHACD* CreateVHACD_ASYNC(void)
{
MyHACD_API *m = new MyHACD_API;
return static_cast<IVHACD *>(m);
}
}; // end of VHACD namespace
| 11,328 | C++ | 30.382271 | 124 | 0.565148 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/src/FloatMath.cpp | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <float.h>
#include "FloatMath.h"
#include <vector>
#include <malloc.h>
#define REAL float
#include "FloatMath.inl"
#undef REAL
#define REAL double
#include "FloatMath.inl"
| 282 | C++ | 13.894736 | 24 | 0.716312 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btMinMax.h | /*
Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef BT_GEN_MINMAX_H
#define BT_GEN_MINMAX_H
#include "btScalar.h"
template <class T>
SIMD_FORCE_INLINE const T& btMin(const T& a, const T& b)
{
return a < b ? a : b;
}
template <class T>
SIMD_FORCE_INLINE const T& btMax(const T& a, const T& b)
{
return a > b ? a : b;
}
template <class T>
SIMD_FORCE_INLINE const T& btClamped(const T& a, const T& lb, const T& ub)
{
return a < lb ? lb : (ub < a ? ub : a);
}
template <class T>
SIMD_FORCE_INLINE void btSetMin(T& a, const T& b)
{
if (b < a) {
a = b;
}
}
template <class T>
SIMD_FORCE_INLINE void btSetMax(T& a, const T& b)
{
if (a < b) {
a = b;
}
}
template <class T>
SIMD_FORCE_INLINE void btClamp(T& a, const T& lb, const T& ub)
{
if (a < lb) {
a = lb;
}
else if (ub < a) {
a = ub;
}
}
#endif //BT_GEN_MINMAX_H
| 1,763 | C | 25.727272 | 243 | 0.680091 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdMutex.h | /*!
**
** Copyright (c) 2009 by John W. Ratcliff mailto:[email protected]
**
** Portions of this source has been released with the PhysXViewer application, as well as
** Rocket, CreateDynamics, ODF, and as a number of sample code snippets.
**
** If you find this code useful or you are feeling particularily generous I would
** ask that you please go to http://www.amillionpixels.us and make a donation
** to Troy DeMolay.
**
** DeMolay is a youth group for young men between the ages of 12 and 21.
** It teaches strong moral principles, as well as leadership skills and
** public speaking. The donations page uses the 'pay for pixels' paradigm
** where, in this case, a pixel is only a single penny. Donations can be
** made for as small as $4 or as high as a $100 block. Each person who donates
** will get a link to their own site as well as acknowledgement on the
** donations blog located here http://www.amillionpixels.blogspot.com/
**
** If you wish to contact me you can use the following methods:
**
** Skype ID: jratcliff63367
** Yahoo: jratcliff63367
** AOL: jratcliff1961
** email: [email protected]
**
**
** The MIT license:
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and associated documentation files (the "Software"), to deal
** in the Software without restriction, including without limitation the rights
** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
** copies of the Software, and to permit persons to whom the Software is furnished
** to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in all
** copies or substantial portions of the Software.
** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#ifndef VHACD_MUTEX_H
#define VHACD_MUTEX_H
#if defined(WIN32)
#ifndef _WIN32_WINNT
#define _WIN32_WINNT 0x400
#endif
#include <windows.h>
#pragma comment(lib, "winmm.lib")
#endif
#if defined(__linux__)
//#include <sys/time.h>
#include <errno.h>
#include <time.h>
#include <unistd.h>
#define __stdcall
#endif
#if defined(__APPLE__) || defined(__linux__)
#include <pthread.h>
#endif
#if defined(__APPLE__)
#define PTHREAD_MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE
#endif
#define VHACD_DEBUG
//#define VHACD_NDEBUG
#ifdef VHACD_NDEBUG
#define VHACD_VERIFY(x) (x)
#else
#define VHACD_VERIFY(x) assert((x))
#endif
namespace VHACD {
class Mutex {
public:
Mutex(void)
{
#if defined(WIN32) || defined(_XBOX)
InitializeCriticalSection(&m_mutex);
#elif defined(__APPLE__) || defined(__linux__)
pthread_mutexattr_t mutexAttr; // Mutex Attribute
VHACD_VERIFY(pthread_mutexattr_init(&mutexAttr) == 0);
VHACD_VERIFY(pthread_mutexattr_settype(&mutexAttr, PTHREAD_MUTEX_RECURSIVE_NP) == 0);
VHACD_VERIFY(pthread_mutex_init(&m_mutex, &mutexAttr) == 0);
VHACD_VERIFY(pthread_mutexattr_destroy(&mutexAttr) == 0);
#endif
}
~Mutex(void)
{
#if defined(WIN32) || defined(_XBOX)
DeleteCriticalSection(&m_mutex);
#elif defined(__APPLE__) || defined(__linux__)
VHACD_VERIFY(pthread_mutex_destroy(&m_mutex) == 0);
#endif
}
void Lock(void)
{
#if defined(WIN32) || defined(_XBOX)
EnterCriticalSection(&m_mutex);
#elif defined(__APPLE__) || defined(__linux__)
VHACD_VERIFY(pthread_mutex_lock(&m_mutex) == 0);
#endif
}
bool TryLock(void)
{
#if defined(WIN32) || defined(_XBOX)
bool bRet = false;
//assert(("TryEnterCriticalSection seems to not work on XP???", 0));
bRet = TryEnterCriticalSection(&m_mutex) ? true : false;
return bRet;
#elif defined(__APPLE__) || defined(__linux__)
int32_t result = pthread_mutex_trylock(&m_mutex);
return (result == 0);
#endif
}
void Unlock(void)
{
#if defined(WIN32) || defined(_XBOX)
LeaveCriticalSection(&m_mutex);
#elif defined(__APPLE__) || defined(__linux__)
VHACD_VERIFY(pthread_mutex_unlock(&m_mutex) == 0);
#endif
}
private:
#if defined(WIN32) || defined(_XBOX)
CRITICAL_SECTION m_mutex;
#elif defined(__APPLE__) || defined(__linux__)
pthread_mutex_t m_mutex;
#endif
};
}
#endif // VHACD_MUTEX_H
| 4,677 | C | 30.395973 | 93 | 0.693179 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btScalar.h | /*
Copyright (c) 2003-2009 Erwin Coumans http://bullet.googlecode.com
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef BT_SCALAR_H
#define BT_SCALAR_H
#ifdef BT_MANAGED_CODE
//Aligned data types not supported in managed code
#pragma unmanaged
#endif
#include <float.h>
#include <math.h>
#include <stdlib.h> //size_t for MSVC 6.0
#include <stdint.h>
/* SVN $Revision$ on $Date$ from http://bullet.googlecode.com*/
#define BT_BULLET_VERSION 279
inline int32_t btGetVersion()
{
return BT_BULLET_VERSION;
}
#if defined(DEBUG) || defined(_DEBUG)
#define BT_DEBUG
#endif
#ifdef _WIN32
#if defined(__MINGW32__) || defined(__CYGWIN__) || (defined(_MSC_VER) && _MSC_VER < 1300)
#define SIMD_FORCE_INLINE inline
#define ATTRIBUTE_ALIGNED16(a) a
#define ATTRIBUTE_ALIGNED64(a) a
#define ATTRIBUTE_ALIGNED128(a) a
#else
//#define BT_HAS_ALIGNED_ALLOCATOR
#pragma warning(disable : 4324) // disable padding warning
// #pragma warning(disable:4530) // Disable the exception disable but used in MSCV Stl warning.
// #pragma warning(disable:4996) //Turn off warnings about deprecated C routines
// #pragma warning(disable:4786) // Disable the "debug name too long" warning
#define SIMD_FORCE_INLINE __forceinline
#define ATTRIBUTE_ALIGNED16(a) __declspec(align(16)) a
#define ATTRIBUTE_ALIGNED64(a) __declspec(align(64)) a
#define ATTRIBUTE_ALIGNED128(a) __declspec(align(128)) a
#ifdef _XBOX
#define BT_USE_VMX128
#include <ppcintrinsics.h>
#define BT_HAVE_NATIVE_FSEL
#define btFsel(a, b, c) __fsel((a), (b), (c))
#else
#if (defined(_WIN32) && (_MSC_VER) && _MSC_VER >= 1400) && (!defined(BT_USE_DOUBLE_PRECISION))
#define BT_USE_SSE
#include <emmintrin.h>
#endif
#endif //_XBOX
#endif //__MINGW32__
#include <assert.h>
#ifdef BT_DEBUG
#define btAssert assert
#else
#define btAssert(x)
#endif
//btFullAssert is optional, slows down a lot
#define btFullAssert(x)
#define btLikely(_c) _c
#define btUnlikely(_c) _c
#else
#if defined(__CELLOS_LV2__)
#define SIMD_FORCE_INLINE inline __attribute__((always_inline))
#define ATTRIBUTE_ALIGNED16(a) a __attribute__((aligned(16)))
#define ATTRIBUTE_ALIGNED64(a) a __attribute__((aligned(64)))
#define ATTRIBUTE_ALIGNED128(a) a __attribute__((aligned(128)))
#ifndef assert
#include <assert.h>
#endif
#ifdef BT_DEBUG
#ifdef __SPU__
#include <spu_printf.h>
#define printf spu_printf
#define btAssert(x) \
{ \
if (!(x)) { \
printf("Assert " __FILE__ ":%u (" #x ")\n", __LINE__); \
spu_hcmpeq(0, 0); \
} \
}
#else
#define btAssert assert
#endif
#else
#define btAssert(x)
#endif
//btFullAssert is optional, slows down a lot
#define btFullAssert(x)
#define btLikely(_c) _c
#define btUnlikely(_c) _c
#else
#ifdef USE_LIBSPE2
#define SIMD_FORCE_INLINE __inline
#define ATTRIBUTE_ALIGNED16(a) a __attribute__((aligned(16)))
#define ATTRIBUTE_ALIGNED64(a) a __attribute__((aligned(64)))
#define ATTRIBUTE_ALIGNED128(a) a __attribute__((aligned(128)))
#ifndef assert
#include <assert.h>
#endif
#ifdef BT_DEBUG
#define btAssert assert
#else
#define btAssert(x)
#endif
//btFullAssert is optional, slows down a lot
#define btFullAssert(x)
#define btLikely(_c) __builtin_expect((_c), 1)
#define btUnlikely(_c) __builtin_expect((_c), 0)
#else
//non-windows systems
#if (defined(__APPLE__) && defined(__i386__) && (!defined(BT_USE_DOUBLE_PRECISION)))
#define BT_USE_SSE
#include <emmintrin.h>
#define SIMD_FORCE_INLINE inline
///@todo: check out alignment methods for other platforms/compilers
#define ATTRIBUTE_ALIGNED16(a) a __attribute__((aligned(16)))
#define ATTRIBUTE_ALIGNED64(a) a __attribute__((aligned(64)))
#define ATTRIBUTE_ALIGNED128(a) a __attribute__((aligned(128)))
#ifndef assert
#include <assert.h>
#endif
#if defined(DEBUG) || defined(_DEBUG)
#define btAssert assert
#else
#define btAssert(x)
#endif
//btFullAssert is optional, slows down a lot
#define btFullAssert(x)
#define btLikely(_c) _c
#define btUnlikely(_c) _c
#else
#define SIMD_FORCE_INLINE inline
///@todo: check out alignment methods for other platforms/compilers
///#define ATTRIBUTE_ALIGNED16(a) a __attribute__ ((aligned (16)))
///#define ATTRIBUTE_ALIGNED64(a) a __attribute__ ((aligned (64)))
///#define ATTRIBUTE_ALIGNED128(a) a __attribute__ ((aligned (128)))
#define ATTRIBUTE_ALIGNED16(a) a
#define ATTRIBUTE_ALIGNED64(a) a
#define ATTRIBUTE_ALIGNED128(a) a
#ifndef assert
#include <assert.h>
#endif
#if defined(DEBUG) || defined(_DEBUG)
#define btAssert assert
#else
#define btAssert(x)
#endif
//btFullAssert is optional, slows down a lot
#define btFullAssert(x)
#define btLikely(_c) _c
#define btUnlikely(_c) _c
#endif //__APPLE__
#endif // LIBSPE2
#endif //__CELLOS_LV2__
#endif
///The btScalar type abstracts floating point numbers, to easily switch between double and single floating point precision.
#if defined(BT_USE_DOUBLE_PRECISION)
typedef double btScalar;
//this number could be bigger in double precision
#define BT_LARGE_FLOAT 1e30
#else
typedef float btScalar;
//keep BT_LARGE_FLOAT*BT_LARGE_FLOAT < FLT_MAX
#define BT_LARGE_FLOAT 1e18f
#endif
#define BT_DECLARE_ALIGNED_ALLOCATOR() \
SIMD_FORCE_INLINE void* operator new(size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \
SIMD_FORCE_INLINE void operator delete(void* ptr) { btAlignedFree(ptr); } \
SIMD_FORCE_INLINE void* operator new(size_t, void* ptr) { return ptr; } \
SIMD_FORCE_INLINE void operator delete(void*, void*) {} \
SIMD_FORCE_INLINE void* operator new[](size_t sizeInBytes) { return btAlignedAlloc(sizeInBytes, 16); } \
SIMD_FORCE_INLINE void operator delete[](void* ptr) { btAlignedFree(ptr); } \
SIMD_FORCE_INLINE void* operator new[](size_t, void* ptr) { return ptr; } \
SIMD_FORCE_INLINE void operator delete[](void*, void*) {}
#if defined(BT_USE_DOUBLE_PRECISION) || defined(BT_FORCE_DOUBLE_FUNCTIONS)
SIMD_FORCE_INLINE btScalar btSqrt(btScalar x)
{
return sqrt(x);
}
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabs(x); }
SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cos(x); }
SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sin(x); }
SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tan(x); }
SIMD_FORCE_INLINE btScalar btAcos(btScalar x)
{
if (x < btScalar(-1))
x = btScalar(-1);
if (x > btScalar(1))
x = btScalar(1);
return acos(x);
}
SIMD_FORCE_INLINE btScalar btAsin(btScalar x)
{
if (x < btScalar(-1))
x = btScalar(-1);
if (x > btScalar(1))
x = btScalar(1);
return asin(x);
}
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atan(x); }
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2(x, y); }
SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return exp(x); }
SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return log(x); }
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return pow(x, y); }
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmod(x, y); }
#else
SIMD_FORCE_INLINE btScalar btSqrt(btScalar y)
{
#ifdef USE_APPROXIMATION
double x, z, tempf;
unsigned long* tfptr = ((unsigned long*)&tempf) + 1;
tempf = y;
*tfptr = (0xbfcdd90a - *tfptr) >> 1; /* estimate of 1/sqrt(y) */
x = tempf;
z = y * btScalar(0.5);
x = (btScalar(1.5) * x) - (x * x) * (x * z); /* iteration formula */
x = (btScalar(1.5) * x) - (x * x) * (x * z);
x = (btScalar(1.5) * x) - (x * x) * (x * z);
x = (btScalar(1.5) * x) - (x * x) * (x * z);
x = (btScalar(1.5) * x) - (x * x) * (x * z);
return x * y;
#else
return sqrtf(y);
#endif
}
SIMD_FORCE_INLINE btScalar btFabs(btScalar x) { return fabsf(x); }
SIMD_FORCE_INLINE btScalar btCos(btScalar x) { return cosf(x); }
SIMD_FORCE_INLINE btScalar btSin(btScalar x) { return sinf(x); }
SIMD_FORCE_INLINE btScalar btTan(btScalar x) { return tanf(x); }
SIMD_FORCE_INLINE btScalar btAcos(btScalar x)
{
if (x < btScalar(-1))
x = btScalar(-1);
if (x > btScalar(1))
x = btScalar(1);
return acosf(x);
}
SIMD_FORCE_INLINE btScalar btAsin(btScalar x)
{
if (x < btScalar(-1))
x = btScalar(-1);
if (x > btScalar(1))
x = btScalar(1);
return asinf(x);
}
SIMD_FORCE_INLINE btScalar btAtan(btScalar x) { return atanf(x); }
SIMD_FORCE_INLINE btScalar btAtan2(btScalar x, btScalar y) { return atan2f(x, y); }
SIMD_FORCE_INLINE btScalar btExp(btScalar x) { return expf(x); }
SIMD_FORCE_INLINE btScalar btLog(btScalar x) { return logf(x); }
SIMD_FORCE_INLINE btScalar btPow(btScalar x, btScalar y) { return powf(x, y); }
SIMD_FORCE_INLINE btScalar btFmod(btScalar x, btScalar y) { return fmodf(x, y); }
#endif
#define SIMD_2_PI btScalar(6.283185307179586232)
#define SIMD_PI (SIMD_2_PI * btScalar(0.5))
#define SIMD_HALF_PI (SIMD_2_PI * btScalar(0.25))
#define SIMD_RADS_PER_DEG (SIMD_2_PI / btScalar(360.0))
#define SIMD_DEGS_PER_RAD (btScalar(360.0) / SIMD_2_PI)
#define SIMDSQRT12 btScalar(0.7071067811865475244008443621048490)
#define btRecipSqrt(x) ((btScalar)(btScalar(1.0) / btSqrt(btScalar(x)))) /* reciprocal square root */
#ifdef BT_USE_DOUBLE_PRECISION
#define SIMD_EPSILON DBL_EPSILON
#define SIMD_INFINITY DBL_MAX
#else
#define SIMD_EPSILON FLT_EPSILON
#define SIMD_INFINITY FLT_MAX
#endif
SIMD_FORCE_INLINE btScalar btAtan2Fast(btScalar y, btScalar x)
{
btScalar coeff_1 = SIMD_PI / 4.0f;
btScalar coeff_2 = 3.0f * coeff_1;
btScalar abs_y = btFabs(y);
btScalar angle;
if (x >= 0.0f) {
btScalar r = (x - abs_y) / (x + abs_y);
angle = coeff_1 - coeff_1 * r;
}
else {
btScalar r = (x + abs_y) / (abs_y - x);
angle = coeff_2 - coeff_1 * r;
}
return (y < 0.0f) ? -angle : angle;
}
SIMD_FORCE_INLINE bool btFuzzyZero(btScalar x) { return btFabs(x) < SIMD_EPSILON; }
SIMD_FORCE_INLINE bool btEqual(btScalar a, btScalar eps)
{
return (((a) <= eps) && !((a) < -eps));
}
SIMD_FORCE_INLINE bool btGreaterEqual(btScalar a, btScalar eps)
{
return (!((a) <= eps));
}
SIMD_FORCE_INLINE int32_t btIsNegative(btScalar x)
{
return x < btScalar(0.0) ? 1 : 0;
}
SIMD_FORCE_INLINE btScalar btRadians(btScalar x) { return x * SIMD_RADS_PER_DEG; }
SIMD_FORCE_INLINE btScalar btDegrees(btScalar x) { return x * SIMD_DEGS_PER_RAD; }
#define BT_DECLARE_HANDLE(name) \
typedef struct name##__ { \
int32_t unused; \
} * name
#ifndef btFsel
SIMD_FORCE_INLINE btScalar btFsel(btScalar a, btScalar b, btScalar c)
{
return a >= 0 ? b : c;
}
#endif
#define btFsels(a, b, c) (btScalar) btFsel(a, b, c)
SIMD_FORCE_INLINE bool btMachineIsLittleEndian()
{
long int i = 1;
const char* p = (const char*)&i;
if (p[0] == 1) // Lowest address contains the least significant byte
return true;
else
return false;
}
///btSelect avoids branches, which makes performance much better for consoles like Playstation 3 and XBox 360
///Thanks Phil Knight. See also http://www.cellperformance.com/articles/2006/04/more_techniques_for_eliminatin_1.html
SIMD_FORCE_INLINE unsigned btSelect(unsigned condition, unsigned valueIfConditionNonZero, unsigned valueIfConditionZero)
{
// Set testNz to 0xFFFFFFFF if condition is nonzero, 0x00000000 if condition is zero
// Rely on positive value or'ed with its negative having sign bit on
// and zero value or'ed with its negative (which is still zero) having sign bit off
// Use arithmetic shift right, shifting the sign bit through all 32 bits
unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31);
unsigned testEqz = ~testNz;
return ((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz));
}
SIMD_FORCE_INLINE int32_t btSelect(unsigned condition, int32_t valueIfConditionNonZero, int32_t valueIfConditionZero)
{
unsigned testNz = (unsigned)(((int32_t)condition | -(int32_t)condition) >> 31);
unsigned testEqz = ~testNz;
return static_cast<int32_t>((valueIfConditionNonZero & testNz) | (valueIfConditionZero & testEqz));
}
SIMD_FORCE_INLINE float btSelect(unsigned condition, float valueIfConditionNonZero, float valueIfConditionZero)
{
#ifdef BT_HAVE_NATIVE_FSEL
return (float)btFsel((btScalar)condition - btScalar(1.0f), valueIfConditionNonZero, valueIfConditionZero);
#else
return (condition != 0) ? valueIfConditionNonZero : valueIfConditionZero;
#endif
}
template <typename T>
SIMD_FORCE_INLINE void btSwap(T& a, T& b)
{
T tmp = a;
a = b;
b = tmp;
}
//PCK: endian swapping functions
SIMD_FORCE_INLINE unsigned btSwapEndian(unsigned val)
{
return (((val & 0xff000000) >> 24) | ((val & 0x00ff0000) >> 8) | ((val & 0x0000ff00) << 8) | ((val & 0x000000ff) << 24));
}
SIMD_FORCE_INLINE unsigned short btSwapEndian(unsigned short val)
{
return static_cast<unsigned short>(((val & 0xff00) >> 8) | ((val & 0x00ff) << 8));
}
SIMD_FORCE_INLINE unsigned btSwapEndian(int32_t val)
{
return btSwapEndian((unsigned)val);
}
SIMD_FORCE_INLINE unsigned short btSwapEndian(short val)
{
return btSwapEndian((unsigned short)val);
}
///btSwapFloat uses using char pointers to swap the endianness
////btSwapFloat/btSwapDouble will NOT return a float, because the machine might 'correct' invalid floating point values
///Not all values of sign/exponent/mantissa are valid floating point numbers according to IEEE 754.
///When a floating point unit is faced with an invalid value, it may actually change the value, or worse, throw an exception.
///In most systems, running user mode code, you wouldn't get an exception, but instead the hardware/os/runtime will 'fix' the number for you.
///so instead of returning a float/double, we return integer/long long integer
SIMD_FORCE_INLINE uint32_t btSwapEndianFloat(float d)
{
uint32_t a = 0;
unsigned char* dst = (unsigned char*)&a;
unsigned char* src = (unsigned char*)&d;
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
return a;
}
// unswap using char pointers
SIMD_FORCE_INLINE float btUnswapEndianFloat(uint32_t a)
{
float d = 0.0f;
unsigned char* src = (unsigned char*)&a;
unsigned char* dst = (unsigned char*)&d;
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
return d;
}
// swap using char pointers
SIMD_FORCE_INLINE void btSwapEndianDouble(double d, unsigned char* dst)
{
unsigned char* src = (unsigned char*)&d;
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
}
// unswap using char pointers
SIMD_FORCE_INLINE double btUnswapEndianDouble(const unsigned char* src)
{
double d = 0.0;
unsigned char* dst = (unsigned char*)&d;
dst[0] = src[7];
dst[1] = src[6];
dst[2] = src[5];
dst[3] = src[4];
dst[4] = src[3];
dst[5] = src[2];
dst[6] = src[1];
dst[7] = src[0];
return d;
}
// returns normalized value in range [-SIMD_PI, SIMD_PI]
SIMD_FORCE_INLINE btScalar btNormalizeAngle(btScalar angleInRadians)
{
angleInRadians = btFmod(angleInRadians, SIMD_2_PI);
if (angleInRadians < -SIMD_PI) {
return angleInRadians + SIMD_2_PI;
}
else if (angleInRadians > SIMD_PI) {
return angleInRadians - SIMD_2_PI;
}
else {
return angleInRadians;
}
}
///rudimentary class to provide type info
struct btTypedObject {
btTypedObject(int32_t objectType)
: m_objectType(objectType)
{
}
int32_t m_objectType;
inline int32_t getObjectType() const
{
return m_objectType;
}
};
#endif //BT_SCALAR_H
| 16,847 | C | 30.550562 | 243 | 0.662136 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdRaycastMesh.h | #ifndef RAYCAST_MESH_H
#define RAYCAST_MESH_H
#include <stdint.h>
namespace VHACD
{
// Very simple brute force raycast against a triangle mesh. Tests every triangle; no hierachy.
// Does a deep copy, always does calculations with full double float precision
class RaycastMesh
{
public:
static RaycastMesh * createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh
const double *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc.
uint32_t tcount, // The number of triangles in the source triangle mesh
const uint32_t *indices); // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ...
static RaycastMesh * createRaycastMesh(uint32_t vcount, // The number of vertices in the source triangle mesh
const float *vertices, // The array of vertex positions in the format x1,y1,z1..x2,y2,z2.. etc.
uint32_t tcount, // The number of triangles in the source triangle mesh
const uint32_t *indices); // The triangle indices in the format of i1,i2,i3 ... i4,i5,i6, ...
virtual bool raycast(const double *from, // The starting point of the raycast
const double *to, // The ending point of the raycast
const double *closestToPoint, // The point to match the nearest hit location (can just be the 'from' location of no specific point)
double *hitLocation, // The point where the ray hit nearest to the 'closestToPoint' location
double *hitDistance) = 0; // The distance the ray traveled to the hit location
virtual void release(void) = 0;
protected:
virtual ~RaycastMesh(void) { };
};
} // end of VHACD namespace
#endif
| 1,853 | C | 45.349999 | 145 | 0.641662 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdMesh.h | /* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#ifndef VHACD_MESH_H
#define VHACD_MESH_H
#include "vhacdSArray.h"
#include "vhacdVector.h"
#define VHACD_DEBUG_MESH
namespace VHACD {
enum AXIS {
AXIS_X = 0,
AXIS_Y = 1,
AXIS_Z = 2
};
struct Plane {
double m_a;
double m_b;
double m_c;
double m_d;
AXIS m_axis;
short m_index;
};
#ifdef VHACD_DEBUG_MESH
struct Material {
Vec3<double> m_diffuseColor;
double m_ambientIntensity;
Vec3<double> m_specularColor;
Vec3<double> m_emissiveColor;
double m_shininess;
double m_transparency;
Material(void)
{
m_diffuseColor.X() = 0.5;
m_diffuseColor.Y() = 0.5;
m_diffuseColor.Z() = 0.5;
m_specularColor.X() = 0.5;
m_specularColor.Y() = 0.5;
m_specularColor.Z() = 0.5;
m_ambientIntensity = 0.4;
m_emissiveColor.X() = 0.0;
m_emissiveColor.Y() = 0.0;
m_emissiveColor.Z() = 0.0;
m_shininess = 0.4;
m_transparency = 0.0;
};
};
#endif // VHACD_DEBUG_MESH
//! Triangular mesh data structure
class Mesh {
public:
void AddPoint(const Vec3<double>& pt) { m_points.PushBack(pt); };
void SetPoint(size_t index, const Vec3<double>& pt) { m_points[index] = pt; };
const Vec3<double>& GetPoint(size_t index) const { return m_points[index]; };
Vec3<double>& GetPoint(size_t index) { return m_points[index]; };
size_t GetNPoints() const { return m_points.Size(); };
double* GetPoints() { return (double*)m_points.Data(); } // ugly
const double* const GetPoints() const { return (double*)m_points.Data(); } // ugly
const Vec3<double>* const GetPointsBuffer() const { return m_points.Data(); } //
Vec3<double>* const GetPointsBuffer() { return m_points.Data(); } //
void AddTriangle(const Vec3<int32_t>& tri) { m_triangles.PushBack(tri); };
void SetTriangle(size_t index, const Vec3<int32_t>& tri) { m_triangles[index] = tri; };
const Vec3<int32_t>& GetTriangle(size_t index) const { return m_triangles[index]; };
Vec3<int32_t>& GetTriangle(size_t index) { return m_triangles[index]; };
size_t GetNTriangles() const { return m_triangles.Size(); };
int32_t* GetTriangles() { return (int32_t*)m_triangles.Data(); } // ugly
const int32_t* const GetTriangles() const { return (int32_t*)m_triangles.Data(); } // ugly
const Vec3<int32_t>* const GetTrianglesBuffer() const { return m_triangles.Data(); }
Vec3<int32_t>* const GetTrianglesBuffer() { return m_triangles.Data(); }
const Vec3<double>& GetCenter() const { return m_center; }
const Vec3<double>& GetMinBB() const { return m_minBB; }
const Vec3<double>& GetMaxBB() const { return m_maxBB; }
void ClearPoints() { m_points.Clear(); }
void ClearTriangles() { m_triangles.Clear(); }
void Clear()
{
ClearPoints();
ClearTriangles();
}
void ResizePoints(size_t nPts) { m_points.Resize(nPts); }
void ResizeTriangles(size_t nTri) { m_triangles.Resize(nTri); }
void CopyPoints(SArray<Vec3<double> >& points) const { points = m_points; }
double GetDiagBB() const { return m_diag; }
double ComputeVolume() const;
void ComputeConvexHull(const double* const pts,
const size_t nPts);
void Clip(const Plane& plane,
SArray<Vec3<double> >& positivePart,
SArray<Vec3<double> >& negativePart) const;
bool IsInside(const Vec3<double>& pt) const;
double ComputeDiagBB();
Vec3<double> &ComputeCenter(void);
#ifdef VHACD_DEBUG_MESH
bool LoadOFF(const std::string& fileName, bool invert);
bool SaveVRML2(const std::string& fileName) const;
bool SaveVRML2(std::ofstream& fout, const Material& material) const;
bool SaveOFF(const std::string& fileName) const;
#endif // VHACD_DEBUG_MESH
//! Constructor.
Mesh();
//! Destructor.
~Mesh(void);
private:
SArray<Vec3<double> > m_points;
SArray<Vec3<int32_t> > m_triangles;
Vec3<double> m_minBB;
Vec3<double> m_maxBB;
Vec3<double> m_center;
double m_diag;
};
}
#endif | 5,520 | C | 41.46923 | 756 | 0.683152 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/btVector3.h | /*
Copyright (c) 2003-2006 Gino van den Bergen / Erwin Coumans http://continuousphysics.com/Bullet/
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#ifndef BT_VECTOR3_H
#define BT_VECTOR3_H
#include "btMinMax.h"
#include "btScalar.h"
#ifdef BT_USE_DOUBLE_PRECISION
#define btVector3Data btVector3DoubleData
#define btVector3DataName "btVector3DoubleData"
#else
#define btVector3Data btVector3FloatData
#define btVector3DataName "btVector3FloatData"
#endif //BT_USE_DOUBLE_PRECISION
/**@brief btVector3 can be used to represent 3D points and vectors.
* It has an un-used w component to suit 16-byte alignment when btVector3 is stored in containers. This extra component can be used by derived classes (Quaternion?) or by user
* Ideally, this class should be replaced by a platform optimized SIMD version that keeps the data in registers
*/
ATTRIBUTE_ALIGNED16(class)
btVector3
{
public:
#if defined(__SPU__) && defined(__CELLOS_LV2__)
btScalar m_floats[4];
public:
SIMD_FORCE_INLINE const vec_float4& get128() const
{
return *((const vec_float4*)&m_floats[0]);
}
public:
#else //__CELLOS_LV2__ __SPU__
#ifdef BT_USE_SSE // _WIN32
union {
__m128 mVec128;
btScalar m_floats[4];
};
SIMD_FORCE_INLINE __m128 get128() const
{
return mVec128;
}
SIMD_FORCE_INLINE void set128(__m128 v128)
{
mVec128 = v128;
}
#else
btScalar m_floats[4];
#endif
#endif //__CELLOS_LV2__ __SPU__
public:
/**@brief No initialization constructor */
SIMD_FORCE_INLINE btVector3() {}
/**@brief Constructor from scalars
* @param x X value
* @param y Y value
* @param z Z value
*/
SIMD_FORCE_INLINE btVector3(const btScalar& x, const btScalar& y, const btScalar& z)
{
m_floats[0] = x;
m_floats[1] = y;
m_floats[2] = z;
m_floats[3] = btScalar(0.);
}
/**@brief Add a vector to this one
* @param The vector to add to this one */
SIMD_FORCE_INLINE btVector3& operator+=(const btVector3& v)
{
m_floats[0] += v.m_floats[0];
m_floats[1] += v.m_floats[1];
m_floats[2] += v.m_floats[2];
return *this;
}
/**@brief Subtract a vector from this one
* @param The vector to subtract */
SIMD_FORCE_INLINE btVector3& operator-=(const btVector3& v)
{
m_floats[0] -= v.m_floats[0];
m_floats[1] -= v.m_floats[1];
m_floats[2] -= v.m_floats[2];
return *this;
}
/**@brief Scale the vector
* @param s Scale factor */
SIMD_FORCE_INLINE btVector3& operator*=(const btScalar& s)
{
m_floats[0] *= s;
m_floats[1] *= s;
m_floats[2] *= s;
return *this;
}
/**@brief Inversely scale the vector
* @param s Scale factor to divide by */
SIMD_FORCE_INLINE btVector3& operator/=(const btScalar& s)
{
btFullAssert(s != btScalar(0.0));
return * this *= btScalar(1.0) / s;
}
/**@brief Return the dot product
* @param v The other vector in the dot product */
SIMD_FORCE_INLINE btScalar dot(const btVector3& v) const
{
return m_floats[0] * v.m_floats[0] + m_floats[1] * v.m_floats[1] + m_floats[2] * v.m_floats[2];
}
/**@brief Return the length of the vector squared */
SIMD_FORCE_INLINE btScalar length2() const
{
return dot(*this);
}
/**@brief Return the length of the vector */
SIMD_FORCE_INLINE btScalar length() const
{
return btSqrt(length2());
}
/**@brief Return the distance squared between the ends of this and another vector
* This is symantically treating the vector like a point */
SIMD_FORCE_INLINE btScalar distance2(const btVector3& v) const;
/**@brief Return the distance between the ends of this and another vector
* This is symantically treating the vector like a point */
SIMD_FORCE_INLINE btScalar distance(const btVector3& v) const;
SIMD_FORCE_INLINE btVector3& safeNormalize()
{
btVector3 absVec = this->absolute();
int32_t maxIndex = absVec.maxAxis();
if (absVec[maxIndex] > 0) {
*this /= absVec[maxIndex];
return * this /= length();
}
setValue(1, 0, 0);
return *this;
}
/**@brief Normalize this vector
* x^2 + y^2 + z^2 = 1 */
SIMD_FORCE_INLINE btVector3& normalize()
{
return * this /= length();
}
/**@brief Return a normalized version of this vector */
SIMD_FORCE_INLINE btVector3 normalized() const;
/**@brief Return a rotated version of this vector
* @param wAxis The axis to rotate about
* @param angle The angle to rotate by */
SIMD_FORCE_INLINE btVector3 rotate(const btVector3& wAxis, const btScalar angle) const;
/**@brief Return the angle between this and another vector
* @param v The other vector */
SIMD_FORCE_INLINE btScalar angle(const btVector3& v) const
{
btScalar s = btSqrt(length2() * v.length2());
btFullAssert(s != btScalar(0.0));
return btAcos(dot(v) / s);
}
/**@brief Return a vector will the absolute values of each element */
SIMD_FORCE_INLINE btVector3 absolute() const
{
return btVector3(
btFabs(m_floats[0]),
btFabs(m_floats[1]),
btFabs(m_floats[2]));
}
/**@brief Return the cross product between this and another vector
* @param v The other vector */
SIMD_FORCE_INLINE btVector3 cross(const btVector3& v) const
{
return btVector3(
m_floats[1] * v.m_floats[2] - m_floats[2] * v.m_floats[1],
m_floats[2] * v.m_floats[0] - m_floats[0] * v.m_floats[2],
m_floats[0] * v.m_floats[1] - m_floats[1] * v.m_floats[0]);
}
SIMD_FORCE_INLINE btScalar triple(const btVector3& v1, const btVector3& v2) const
{
return m_floats[0] * (v1.m_floats[1] * v2.m_floats[2] - v1.m_floats[2] * v2.m_floats[1]) + m_floats[1] * (v1.m_floats[2] * v2.m_floats[0] - v1.m_floats[0] * v2.m_floats[2]) + m_floats[2] * (v1.m_floats[0] * v2.m_floats[1] - v1.m_floats[1] * v2.m_floats[0]);
}
/**@brief Return the axis with the smallest value
* Note return values are 0,1,2 for x, y, or z */
SIMD_FORCE_INLINE int32_t minAxis() const
{
return m_floats[0] < m_floats[1] ? (m_floats[0] < m_floats[2] ? 0 : 2) : (m_floats[1] < m_floats[2] ? 1 : 2);
}
/**@brief Return the axis with the largest value
* Note return values are 0,1,2 for x, y, or z */
SIMD_FORCE_INLINE int32_t maxAxis() const
{
return m_floats[0] < m_floats[1] ? (m_floats[1] < m_floats[2] ? 2 : 1) : (m_floats[0] < m_floats[2] ? 2 : 0);
}
SIMD_FORCE_INLINE int32_t furthestAxis() const
{
return absolute().minAxis();
}
SIMD_FORCE_INLINE int32_t closestAxis() const
{
return absolute().maxAxis();
}
SIMD_FORCE_INLINE void setInterpolate3(const btVector3& v0, const btVector3& v1, btScalar rt)
{
btScalar s = btScalar(1.0) - rt;
m_floats[0] = s * v0.m_floats[0] + rt * v1.m_floats[0];
m_floats[1] = s * v0.m_floats[1] + rt * v1.m_floats[1];
m_floats[2] = s * v0.m_floats[2] + rt * v1.m_floats[2];
//don't do the unused w component
// m_co[3] = s * v0[3] + rt * v1[3];
}
/**@brief Return the linear interpolation between this and another vector
* @param v The other vector
* @param t The ration of this to v (t = 0 => return this, t=1 => return other) */
SIMD_FORCE_INLINE btVector3 lerp(const btVector3& v, const btScalar& t) const
{
return btVector3(m_floats[0] + (v.m_floats[0] - m_floats[0]) * t,
m_floats[1] + (v.m_floats[1] - m_floats[1]) * t,
m_floats[2] + (v.m_floats[2] - m_floats[2]) * t);
}
/**@brief Elementwise multiply this vector by the other
* @param v The other vector */
SIMD_FORCE_INLINE btVector3& operator*=(const btVector3& v)
{
m_floats[0] *= v.m_floats[0];
m_floats[1] *= v.m_floats[1];
m_floats[2] *= v.m_floats[2];
return *this;
}
/**@brief Return the x value */
SIMD_FORCE_INLINE const btScalar& getX() const { return m_floats[0]; }
/**@brief Return the y value */
SIMD_FORCE_INLINE const btScalar& getY() const { return m_floats[1]; }
/**@brief Return the z value */
SIMD_FORCE_INLINE const btScalar& getZ() const { return m_floats[2]; }
/**@brief Set the x value */
SIMD_FORCE_INLINE void setX(btScalar x) { m_floats[0] = x; };
/**@brief Set the y value */
SIMD_FORCE_INLINE void setY(btScalar y) { m_floats[1] = y; };
/**@brief Set the z value */
SIMD_FORCE_INLINE void setZ(btScalar z) { m_floats[2] = z; };
/**@brief Set the w value */
SIMD_FORCE_INLINE void setW(btScalar w) { m_floats[3] = w; };
/**@brief Return the x value */
SIMD_FORCE_INLINE const btScalar& x() const { return m_floats[0]; }
/**@brief Return the y value */
SIMD_FORCE_INLINE const btScalar& y() const { return m_floats[1]; }
/**@brief Return the z value */
SIMD_FORCE_INLINE const btScalar& z() const { return m_floats[2]; }
/**@brief Return the w value */
SIMD_FORCE_INLINE const btScalar& w() const { return m_floats[3]; }
//SIMD_FORCE_INLINE btScalar& operator[](int32_t i) { return (&m_floats[0])[i]; }
//SIMD_FORCE_INLINE const btScalar& operator[](int32_t i) const { return (&m_floats[0])[i]; }
///operator btScalar*() replaces operator[], using implicit conversion. We added operator != and operator == to avoid pointer comparisons.
SIMD_FORCE_INLINE operator btScalar*() { return &m_floats[0]; }
SIMD_FORCE_INLINE operator const btScalar*() const { return &m_floats[0]; }
SIMD_FORCE_INLINE bool operator==(const btVector3& other) const
{
return ((m_floats[3] == other.m_floats[3]) && (m_floats[2] == other.m_floats[2]) && (m_floats[1] == other.m_floats[1]) && (m_floats[0] == other.m_floats[0]));
}
SIMD_FORCE_INLINE bool operator!=(const btVector3& other) const
{
return !(*this == other);
}
/**@brief Set each element to the max of the current values and the values of another btVector3
* @param other The other btVector3 to compare with
*/
SIMD_FORCE_INLINE void setMax(const btVector3& other)
{
btSetMax(m_floats[0], other.m_floats[0]);
btSetMax(m_floats[1], other.m_floats[1]);
btSetMax(m_floats[2], other.m_floats[2]);
btSetMax(m_floats[3], other.w());
}
/**@brief Set each element to the min of the current values and the values of another btVector3
* @param other The other btVector3 to compare with
*/
SIMD_FORCE_INLINE void setMin(const btVector3& other)
{
btSetMin(m_floats[0], other.m_floats[0]);
btSetMin(m_floats[1], other.m_floats[1]);
btSetMin(m_floats[2], other.m_floats[2]);
btSetMin(m_floats[3], other.w());
}
SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z)
{
m_floats[0] = x;
m_floats[1] = y;
m_floats[2] = z;
m_floats[3] = btScalar(0.);
}
void getSkewSymmetricMatrix(btVector3 * v0, btVector3 * v1, btVector3 * v2) const
{
v0->setValue(0., -z(), y());
v1->setValue(z(), 0., -x());
v2->setValue(-y(), x(), 0.);
}
void setZero()
{
setValue(btScalar(0.), btScalar(0.), btScalar(0.));
}
SIMD_FORCE_INLINE bool isZero() const
{
return m_floats[0] == btScalar(0) && m_floats[1] == btScalar(0) && m_floats[2] == btScalar(0);
}
SIMD_FORCE_INLINE bool fuzzyZero() const
{
return length2() < SIMD_EPSILON;
}
SIMD_FORCE_INLINE void serialize(struct btVector3Data & dataOut) const;
SIMD_FORCE_INLINE void deSerialize(const struct btVector3Data& dataIn);
SIMD_FORCE_INLINE void serializeFloat(struct btVector3FloatData & dataOut) const;
SIMD_FORCE_INLINE void deSerializeFloat(const struct btVector3FloatData& dataIn);
SIMD_FORCE_INLINE void serializeDouble(struct btVector3DoubleData & dataOut) const;
SIMD_FORCE_INLINE void deSerializeDouble(const struct btVector3DoubleData& dataIn);
};
/**@brief Return the sum of two vectors (Point symantics)*/
SIMD_FORCE_INLINE btVector3
operator+(const btVector3& v1, const btVector3& v2)
{
return btVector3(v1.m_floats[0] + v2.m_floats[0], v1.m_floats[1] + v2.m_floats[1], v1.m_floats[2] + v2.m_floats[2]);
}
/**@brief Return the elementwise product of two vectors */
SIMD_FORCE_INLINE btVector3
operator*(const btVector3& v1, const btVector3& v2)
{
return btVector3(v1.m_floats[0] * v2.m_floats[0], v1.m_floats[1] * v2.m_floats[1], v1.m_floats[2] * v2.m_floats[2]);
}
/**@brief Return the difference between two vectors */
SIMD_FORCE_INLINE btVector3
operator-(const btVector3& v1, const btVector3& v2)
{
return btVector3(v1.m_floats[0] - v2.m_floats[0], v1.m_floats[1] - v2.m_floats[1], v1.m_floats[2] - v2.m_floats[2]);
}
/**@brief Return the negative of the vector */
SIMD_FORCE_INLINE btVector3
operator-(const btVector3& v)
{
return btVector3(-v.m_floats[0], -v.m_floats[1], -v.m_floats[2]);
}
/**@brief Return the vector scaled by s */
SIMD_FORCE_INLINE btVector3
operator*(const btVector3& v, const btScalar& s)
{
return btVector3(v.m_floats[0] * s, v.m_floats[1] * s, v.m_floats[2] * s);
}
/**@brief Return the vector scaled by s */
SIMD_FORCE_INLINE btVector3
operator*(const btScalar& s, const btVector3& v)
{
return v * s;
}
/**@brief Return the vector inversely scaled by s */
SIMD_FORCE_INLINE btVector3
operator/(const btVector3& v, const btScalar& s)
{
btFullAssert(s != btScalar(0.0));
return v * (btScalar(1.0) / s);
}
/**@brief Return the vector inversely scaled by s */
SIMD_FORCE_INLINE btVector3
operator/(const btVector3& v1, const btVector3& v2)
{
return btVector3(v1.m_floats[0] / v2.m_floats[0], v1.m_floats[1] / v2.m_floats[1], v1.m_floats[2] / v2.m_floats[2]);
}
/**@brief Return the dot product between two vectors */
SIMD_FORCE_INLINE btScalar
btDot(const btVector3& v1, const btVector3& v2)
{
return v1.dot(v2);
}
/**@brief Return the distance squared between two vectors */
SIMD_FORCE_INLINE btScalar
btDistance2(const btVector3& v1, const btVector3& v2)
{
return v1.distance2(v2);
}
/**@brief Return the distance between two vectors */
SIMD_FORCE_INLINE btScalar
btDistance(const btVector3& v1, const btVector3& v2)
{
return v1.distance(v2);
}
/**@brief Return the angle between two vectors */
SIMD_FORCE_INLINE btScalar
btAngle(const btVector3& v1, const btVector3& v2)
{
return v1.angle(v2);
}
/**@brief Return the cross product of two vectors */
SIMD_FORCE_INLINE btVector3
btCross(const btVector3& v1, const btVector3& v2)
{
return v1.cross(v2);
}
SIMD_FORCE_INLINE btScalar
btTriple(const btVector3& v1, const btVector3& v2, const btVector3& v3)
{
return v1.triple(v2, v3);
}
/**@brief Return the linear interpolation between two vectors
* @param v1 One vector
* @param v2 The other vector
* @param t The ration of this to v (t = 0 => return v1, t=1 => return v2) */
SIMD_FORCE_INLINE btVector3
lerp(const btVector3& v1, const btVector3& v2, const btScalar& t)
{
return v1.lerp(v2, t);
}
SIMD_FORCE_INLINE btScalar btVector3::distance2(const btVector3& v) const
{
return (v - *this).length2();
}
SIMD_FORCE_INLINE btScalar btVector3::distance(const btVector3& v) const
{
return (v - *this).length();
}
SIMD_FORCE_INLINE btVector3 btVector3::normalized() const
{
return *this / length();
}
SIMD_FORCE_INLINE btVector3 btVector3::rotate(const btVector3& wAxis, const btScalar angle) const
{
// wAxis must be a unit lenght vector
btVector3 o = wAxis * wAxis.dot(*this);
btVector3 x = *this - o;
btVector3 y;
y = wAxis.cross(*this);
return (o + x * btCos(angle) + y * btSin(angle));
}
class btVector4 : public btVector3 {
public:
SIMD_FORCE_INLINE btVector4() {}
SIMD_FORCE_INLINE btVector4(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w)
: btVector3(x, y, z)
{
m_floats[3] = w;
}
SIMD_FORCE_INLINE btVector4 absolute4() const
{
return btVector4(
btFabs(m_floats[0]),
btFabs(m_floats[1]),
btFabs(m_floats[2]),
btFabs(m_floats[3]));
}
btScalar getW() const { return m_floats[3]; }
SIMD_FORCE_INLINE int32_t maxAxis4() const
{
int32_t maxIndex = -1;
btScalar maxVal = btScalar(-BT_LARGE_FLOAT);
if (m_floats[0] > maxVal) {
maxIndex = 0;
maxVal = m_floats[0];
}
if (m_floats[1] > maxVal) {
maxIndex = 1;
maxVal = m_floats[1];
}
if (m_floats[2] > maxVal) {
maxIndex = 2;
maxVal = m_floats[2];
}
if (m_floats[3] > maxVal) {
maxIndex = 3;
}
return maxIndex;
}
SIMD_FORCE_INLINE int32_t minAxis4() const
{
int32_t minIndex = -1;
btScalar minVal = btScalar(BT_LARGE_FLOAT);
if (m_floats[0] < minVal) {
minIndex = 0;
minVal = m_floats[0];
}
if (m_floats[1] < minVal) {
minIndex = 1;
minVal = m_floats[1];
}
if (m_floats[2] < minVal) {
minIndex = 2;
minVal = m_floats[2];
}
if (m_floats[3] < minVal) {
minIndex = 3;
}
return minIndex;
}
SIMD_FORCE_INLINE int32_t closestAxis4() const
{
return absolute4().maxAxis4();
}
/**@brief Set x,y,z and zero w
* @param x Value of x
* @param y Value of y
* @param z Value of z
*/
/* void getValue(btScalar *m) const
{
m[0] = m_floats[0];
m[1] = m_floats[1];
m[2] =m_floats[2];
}
*/
/**@brief Set the values
* @param x Value of x
* @param y Value of y
* @param z Value of z
* @param w Value of w
*/
SIMD_FORCE_INLINE void setValue(const btScalar& x, const btScalar& y, const btScalar& z, const btScalar& w)
{
m_floats[0] = x;
m_floats[1] = y;
m_floats[2] = z;
m_floats[3] = w;
}
};
///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
SIMD_FORCE_INLINE void btSwapScalarEndian(const btScalar& sourceVal, btScalar& destVal)
{
#ifdef BT_USE_DOUBLE_PRECISION
unsigned char* dest = (unsigned char*)&destVal;
unsigned char* src = (unsigned char*)&sourceVal;
dest[0] = src[7];
dest[1] = src[6];
dest[2] = src[5];
dest[3] = src[4];
dest[4] = src[3];
dest[5] = src[2];
dest[6] = src[1];
dest[7] = src[0];
#else
unsigned char* dest = (unsigned char*)&destVal;
unsigned char* src = (unsigned char*)&sourceVal;
dest[0] = src[3];
dest[1] = src[2];
dest[2] = src[1];
dest[3] = src[0];
#endif //BT_USE_DOUBLE_PRECISION
}
///btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
SIMD_FORCE_INLINE void btSwapVector3Endian(const btVector3& sourceVec, btVector3& destVec)
{
for (int32_t i = 0; i < 4; i++) {
btSwapScalarEndian(sourceVec[i], destVec[i]);
}
}
///btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization
SIMD_FORCE_INLINE void btUnSwapVector3Endian(btVector3& vector)
{
btVector3 swappedVec;
for (int32_t i = 0; i < 4; i++) {
btSwapScalarEndian(vector[i], swappedVec[i]);
}
vector = swappedVec;
}
template <class T>
SIMD_FORCE_INLINE void btPlaneSpace1(const T& n, T& p, T& q)
{
if (btFabs(n[2]) > SIMDSQRT12) {
// choose p in y-z plane
btScalar a = n[1] * n[1] + n[2] * n[2];
btScalar k = btRecipSqrt(a);
p[0] = 0;
p[1] = -n[2] * k;
p[2] = n[1] * k;
// set q = n x p
q[0] = a * k;
q[1] = -n[0] * p[2];
q[2] = n[0] * p[1];
}
else {
// choose p in x-y plane
btScalar a = n[0] * n[0] + n[1] * n[1];
btScalar k = btRecipSqrt(a);
p[0] = -n[1] * k;
p[1] = n[0] * k;
p[2] = 0;
// set q = n x p
q[0] = -n[2] * p[1];
q[1] = n[2] * p[0];
q[2] = a * k;
}
}
struct btVector3FloatData {
float m_floats[4];
};
struct btVector3DoubleData {
double m_floats[4];
};
SIMD_FORCE_INLINE void btVector3::serializeFloat(struct btVector3FloatData& dataOut) const
{
///could also do a memcpy, check if it is worth it
for (int32_t i = 0; i < 4; i++)
dataOut.m_floats[i] = float(m_floats[i]);
}
SIMD_FORCE_INLINE void btVector3::deSerializeFloat(const struct btVector3FloatData& dataIn)
{
for (int32_t i = 0; i < 4; i++)
m_floats[i] = btScalar(dataIn.m_floats[i]);
}
SIMD_FORCE_INLINE void btVector3::serializeDouble(struct btVector3DoubleData& dataOut) const
{
///could also do a memcpy, check if it is worth it
for (int32_t i = 0; i < 4; i++)
dataOut.m_floats[i] = double(m_floats[i]);
}
SIMD_FORCE_INLINE void btVector3::deSerializeDouble(const struct btVector3DoubleData& dataIn)
{
for (int32_t i = 0; i < 4; i++)
m_floats[i] = btScalar(dataIn.m_floats[i]);
}
SIMD_FORCE_INLINE void btVector3::serialize(struct btVector3Data& dataOut) const
{
///could also do a memcpy, check if it is worth it
for (int32_t i = 0; i < 4; i++)
dataOut.m_floats[i] = m_floats[i];
}
SIMD_FORCE_INLINE void btVector3::deSerialize(const struct btVector3Data& dataIn)
{
for (int32_t i = 0; i < 4; i++)
m_floats[i] = dataIn.m_floats[i];
}
#endif //BT_VECTOR3_H
| 22,579 | C | 30.536313 | 265 | 0.613845 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdVHACD.h | /* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#ifndef VHACD_VHACD_H
#define VHACD_VHACD_H
#ifdef OPENCL_FOUND
#ifdef __MACH__
#include <OpenCL/cl.h>
#else
#include <CL/cl.h>
#endif
#endif //OPENCL_FOUND
#include "vhacdMutex.h"
#include "vhacdVolume.h"
#include "vhacdRaycastMesh.h"
#include <vector>
typedef std::vector< VHACD::IVHACD::Constraint > ConstraintVector;
#define USE_THREAD 1
#define OCL_MIN_NUM_PRIMITIVES 4096
#define CH_APP_MIN_NUM_PRIMITIVES 64000
namespace VHACD {
class VHACD : public IVHACD {
public:
//! Constructor.
VHACD()
{
#if USE_THREAD == 1 && _OPENMP
m_ompNumProcessors = 2 * omp_get_num_procs();
omp_set_num_threads(m_ompNumProcessors);
#else //USE_THREAD == 1 && _OPENMP
m_ompNumProcessors = 1;
#endif //USE_THREAD == 1 && _OPENMP
#ifdef CL_VERSION_1_1
m_oclWorkGroupSize = 0;
m_oclDevice = 0;
m_oclQueue = 0;
m_oclKernelComputePartialVolumes = 0;
m_oclKernelComputeSum = 0;
#endif //CL_VERSION_1_1
Init();
}
//! Destructor.
~VHACD(void)
{
}
uint32_t GetNConvexHulls() const
{
return (uint32_t)m_convexHulls.Size();
}
void Cancel()
{
SetCancel(true);
}
void GetConvexHull(const uint32_t index, ConvexHull& ch) const
{
Mesh* mesh = m_convexHulls[index];
ch.m_nPoints = (uint32_t)mesh->GetNPoints();
ch.m_nTriangles = (uint32_t)mesh->GetNTriangles();
ch.m_points = mesh->GetPoints();
ch.m_triangles = (uint32_t *)mesh->GetTriangles();
ch.m_volume = mesh->ComputeVolume();
Vec3<double> ¢er = mesh->ComputeCenter();
ch.m_center[0] = center.X();
ch.m_center[1] = center.Y();
ch.m_center[2] = center.Z();
}
void Clean(void)
{
if (mRaycastMesh)
{
mRaycastMesh->release();
mRaycastMesh = nullptr;
}
delete m_volume;
delete m_pset;
size_t nCH = m_convexHulls.Size();
for (size_t p = 0; p < nCH; ++p) {
delete m_convexHulls[p];
}
m_convexHulls.Clear();
Init();
}
void Release(void)
{
delete this;
}
bool Compute(const float* const points,
const uint32_t nPoints,
const uint32_t* const triangles,
const uint32_t nTriangles,
const Parameters& params);
bool Compute(const double* const points,
const uint32_t nPoints,
const uint32_t* const triangles,
const uint32_t nTriangles,
const Parameters& params);
bool OCLInit(void* const oclDevice,
IUserLogger* const logger = 0);
bool OCLRelease(IUserLogger* const logger = 0);
virtual bool ComputeCenterOfMass(double centerOfMass[3]) const;
// Will analyze the HACD results and compute the constraints solutions.
// It will analyze the point at which any two convex hulls touch each other and
// return the total number of constraint pairs found
virtual uint32_t ComputeConstraints(void);
// Returns a pointer to the constraint index; null if the index is not valid or
// the user did not previously call 'ComputeConstraints'
virtual const Constraint *GetConstraint(uint32_t index) const;
private:
void SetCancel(bool cancel)
{
m_cancelMutex.Lock();
m_cancel = cancel;
m_cancelMutex.Unlock();
}
bool GetCancel()
{
m_cancelMutex.Lock();
bool cancel = m_cancel;
m_cancelMutex.Unlock();
return cancel;
}
void Update(const double stageProgress,
const double operationProgress,
const Parameters& params)
{
m_stageProgress = stageProgress;
m_operationProgress = operationProgress;
if (params.m_callback) {
params.m_callback->Update(m_overallProgress,
m_stageProgress,
m_operationProgress,
m_stage.c_str(),
m_operation.c_str());
}
}
void Init()
{
if (mRaycastMesh)
{
mRaycastMesh->release();
mRaycastMesh = nullptr;
}
memset(m_rot, 0, sizeof(double) * 9);
m_dim = 64;
m_volume = 0;
m_volumeCH0 = 0.0;
m_pset = 0;
m_overallProgress = 0.0;
m_stageProgress = 0.0;
m_operationProgress = 0.0;
m_stage = "";
m_operation = "";
m_barycenter[0] = m_barycenter[1] = m_barycenter[2] = 0.0;
m_rot[0][0] = m_rot[1][1] = m_rot[2][2] = 1.0;
SetCancel(false);
}
void ComputePrimitiveSet(const Parameters& params);
void ComputeACD(const Parameters& params);
void MergeConvexHulls(const Parameters& params);
void SimplifyConvexHull(Mesh* const ch, const size_t nvertices, const double minVolume);
void SimplifyConvexHulls(const Parameters& params);
void ComputeBestClippingPlane(const PrimitiveSet* inputPSet,
const double volume,
const SArray<Plane>& planes,
const Vec3<double>& preferredCuttingDirection,
const double w,
const double alpha,
const double beta,
const int32_t convexhullDownsampling,
const double progress0,
const double progress1,
Plane& bestPlane,
double& minConcavity,
const Parameters& params);
template <class T>
void AlignMesh(const T* const points,
const uint32_t stridePoints,
const uint32_t nPoints,
const int32_t* const triangles,
const uint32_t strideTriangles,
const uint32_t nTriangles,
const Parameters& params)
{
if (GetCancel() || !params.m_pca) {
return;
}
m_timer.Tic();
m_stage = "Align mesh";
m_operation = "Voxelization";
std::ostringstream msg;
if (params.m_logger) {
msg << "+ " << m_stage << std::endl;
params.m_logger->Log(msg.str().c_str());
}
Update(0.0, 0.0, params);
if (GetCancel()) {
return;
}
m_dim = (size_t)(pow((double)params.m_resolution, 1.0 / 3.0) + 0.5);
Volume volume;
volume.Voxelize(points, stridePoints, nPoints,
triangles, strideTriangles, nTriangles,
m_dim, m_barycenter, m_rot);
size_t n = volume.GetNPrimitivesOnSurf() + volume.GetNPrimitivesInsideSurf();
Update(50.0, 100.0, params);
if (params.m_logger) {
msg.str("");
msg << "\t dim = " << m_dim << "\t-> " << n << " voxels" << std::endl;
params.m_logger->Log(msg.str().c_str());
}
if (GetCancel()) {
return;
}
m_operation = "PCA";
Update(50.0, 0.0, params);
volume.AlignToPrincipalAxes(m_rot);
m_overallProgress = 1.0;
Update(100.0, 100.0, params);
m_timer.Toc();
if (params.m_logger) {
msg.str("");
msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl;
params.m_logger->Log(msg.str().c_str());
}
}
template <class T>
void VoxelizeMesh(const T* const points,
const uint32_t stridePoints,
const uint32_t nPoints,
const int32_t* const triangles,
const uint32_t strideTriangles,
const uint32_t nTriangles,
const Parameters& params)
{
if (GetCancel()) {
return;
}
m_timer.Tic();
m_stage = "Voxelization";
std::ostringstream msg;
if (params.m_logger) {
msg << "+ " << m_stage << std::endl;
params.m_logger->Log(msg.str().c_str());
}
delete m_volume;
m_volume = 0;
int32_t iteration = 0;
const int32_t maxIteration = 5;
double progress = 0.0;
while (iteration++ < maxIteration && !m_cancel) {
msg.str("");
msg << "Iteration " << iteration;
m_operation = msg.str();
progress = iteration * 100.0 / maxIteration;
Update(progress, 0.0, params);
m_volume = new Volume;
m_volume->Voxelize(points, stridePoints, nPoints,
triangles, strideTriangles, nTriangles,
m_dim, m_barycenter, m_rot);
Update(progress, 100.0, params);
size_t n = m_volume->GetNPrimitivesOnSurf() + m_volume->GetNPrimitivesInsideSurf();
if (params.m_logger) {
msg.str("");
msg << "\t dim = " << m_dim << "\t-> " << n << " voxels" << std::endl;
params.m_logger->Log(msg.str().c_str());
}
double a = pow((double)(params.m_resolution) / n, 0.33);
size_t dim_next = (size_t)(m_dim * a + 0.5);
if (n < params.m_resolution && iteration < maxIteration && m_volume->GetNPrimitivesOnSurf() < params.m_resolution / 8 && m_dim != dim_next) {
delete m_volume;
m_volume = 0;
m_dim = dim_next;
}
else {
break;
}
}
m_overallProgress = 10.0;
Update(100.0, 100.0, params);
m_timer.Toc();
if (params.m_logger) {
msg.str("");
msg << "\t time " << m_timer.GetElapsedTime() / 1000.0 << "s" << std::endl;
params.m_logger->Log(msg.str().c_str());
}
}
template <class T>
bool ComputeACD(const T* const points,
const uint32_t nPoints,
const uint32_t* const triangles,
const uint32_t nTriangles,
const Parameters& params)
{
Init();
if (params.m_projectHullVertices)
{
mRaycastMesh = RaycastMesh::createRaycastMesh(nPoints, points, nTriangles, (const uint32_t *)triangles);
}
if (params.m_oclAcceleration) {
// build kernels
}
AlignMesh(points, 3, nPoints, (int32_t *)triangles, 3, nTriangles, params);
VoxelizeMesh(points, 3, nPoints, (int32_t *)triangles, 3, nTriangles, params);
ComputePrimitiveSet(params);
ComputeACD(params);
MergeConvexHulls(params);
SimplifyConvexHulls(params);
if (params.m_oclAcceleration) {
// Release kernels
}
if (GetCancel()) {
Clean();
return false;
}
return true;
}
private:
RaycastMesh *mRaycastMesh{ nullptr };
SArray<Mesh*> m_convexHulls;
std::string m_stage;
std::string m_operation;
double m_overallProgress;
double m_stageProgress;
double m_operationProgress;
double m_rot[3][3];
double m_volumeCH0;
Vec3<double> m_barycenter;
Timer m_timer;
size_t m_dim;
Volume* m_volume;
PrimitiveSet* m_pset;
Mutex m_cancelMutex;
bool m_cancel;
int32_t m_ompNumProcessors;
#ifdef CL_VERSION_1_1
cl_device_id* m_oclDevice;
cl_context m_oclContext;
cl_program m_oclProgram;
cl_command_queue* m_oclQueue;
cl_kernel* m_oclKernelComputePartialVolumes;
cl_kernel* m_oclKernelComputeSum;
size_t m_oclWorkGroupSize;
#endif //CL_VERSION_1_1
ConstraintVector mConstraints;
};
}
#endif // VHACD_VHACD_H
| 12,732 | C | 32.158854 | 755 | 0.589695 |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/VHACD/inc/vhacdVolume.h | /* Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. The names of the contributors may not be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#ifndef VHACD_VOLUME_H
#define VHACD_VOLUME_H
#include "vhacdMesh.h"
#include "vhacdVector.h"
#include <assert.h>
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4456 4701)
#endif
namespace VHACD {
enum VOXEL_VALUE {
PRIMITIVE_UNDEFINED = 0,
PRIMITIVE_OUTSIDE_SURFACE = 1,
PRIMITIVE_INSIDE_SURFACE = 2,
PRIMITIVE_ON_SURFACE = 3
};
struct Voxel {
public:
short m_coord[3];
short m_data;
};
class PrimitiveSet {
public:
virtual ~PrimitiveSet(){};
virtual PrimitiveSet* Create() const = 0;
virtual const size_t GetNPrimitives() const = 0;
virtual const size_t GetNPrimitivesOnSurf() const = 0;
virtual const size_t GetNPrimitivesInsideSurf() const = 0;
virtual const double GetEigenValue(AXIS axis) const = 0;
virtual const double ComputeMaxVolumeError() const = 0;
virtual const double ComputeVolume() const = 0;
virtual void Clip(const Plane& plane, PrimitiveSet* const positivePart,
PrimitiveSet* const negativePart) const = 0;
virtual void Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts,
SArray<Vec3<double> >* const negativePts, const size_t sampling) const = 0;
virtual void ComputeExteriorPoints(const Plane& plane, const Mesh& mesh,
SArray<Vec3<double> >* const exteriorPts) const = 0;
virtual void ComputeClippedVolumes(const Plane& plane, double& positiveVolume,
double& negativeVolume) const = 0;
virtual void SelectOnSurface(PrimitiveSet* const onSurfP) const = 0;
virtual void ComputeConvexHull(Mesh& meshCH, const size_t sampling = 1) const = 0;
virtual void ComputeBB() = 0;
virtual void ComputePrincipalAxes() = 0;
virtual void AlignToPrincipalAxes() = 0;
virtual void RevertAlignToPrincipalAxes() = 0;
virtual void Convert(Mesh& mesh, const VOXEL_VALUE value) const = 0;
const Mesh& GetConvexHull() const { return m_convexHull; };
Mesh& GetConvexHull() { return m_convexHull; };
private:
Mesh m_convexHull;
};
//!
class VoxelSet : public PrimitiveSet {
friend class Volume;
public:
//! Destructor.
~VoxelSet(void);
//! Constructor.
VoxelSet();
const size_t GetNPrimitives() const { return m_voxels.Size(); }
const size_t GetNPrimitivesOnSurf() const { return m_numVoxelsOnSurface; }
const size_t GetNPrimitivesInsideSurf() const { return m_numVoxelsInsideSurface; }
const double GetEigenValue(AXIS axis) const { return m_D[axis][axis]; }
const double ComputeVolume() const { return m_unitVolume * m_voxels.Size(); }
const double ComputeMaxVolumeError() const { return m_unitVolume * m_numVoxelsOnSurface; }
const Vec3<short>& GetMinBBVoxels() const { return m_minBBVoxels; }
const Vec3<short>& GetMaxBBVoxels() const { return m_maxBBVoxels; }
const Vec3<double>& GetMinBB() const { return m_minBB; }
const double& GetScale() const { return m_scale; }
const double& GetUnitVolume() const { return m_unitVolume; }
Vec3<double> GetPoint(Vec3<short> voxel) const
{
return Vec3<double>(voxel[0] * m_scale + m_minBB[0],
voxel[1] * m_scale + m_minBB[1],
voxel[2] * m_scale + m_minBB[2]);
}
Vec3<double> GetPoint(const Voxel& voxel) const
{
return Vec3<double>(voxel.m_coord[0] * m_scale + m_minBB[0],
voxel.m_coord[1] * m_scale + m_minBB[1],
voxel.m_coord[2] * m_scale + m_minBB[2]);
}
Vec3<double> GetPoint(Vec3<double> voxel) const
{
return Vec3<double>(voxel[0] * m_scale + m_minBB[0],
voxel[1] * m_scale + m_minBB[1],
voxel[2] * m_scale + m_minBB[2]);
}
void GetPoints(const Voxel& voxel, Vec3<double>* const pts) const;
void ComputeConvexHull(Mesh& meshCH, const size_t sampling = 1) const;
void Clip(const Plane& plane, PrimitiveSet* const positivePart, PrimitiveSet* const negativePart) const;
void Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts,
SArray<Vec3<double> >* const negativePts, const size_t sampling) const;
void ComputeExteriorPoints(const Plane& plane, const Mesh& mesh,
SArray<Vec3<double> >* const exteriorPts) const;
void ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const;
void SelectOnSurface(PrimitiveSet* const onSurfP) const;
void ComputeBB();
void Convert(Mesh& mesh, const VOXEL_VALUE value) const;
void ComputePrincipalAxes();
PrimitiveSet* Create() const
{
return new VoxelSet();
}
void AlignToPrincipalAxes(){};
void RevertAlignToPrincipalAxes(){};
Voxel* const GetVoxels() { return m_voxels.Data(); }
const Voxel* const GetVoxels() const { return m_voxels.Data(); }
private:
size_t m_numVoxelsOnSurface;
size_t m_numVoxelsInsideSurface;
Vec3<double> m_minBB;
double m_scale;
SArray<Voxel, 8> m_voxels;
double m_unitVolume;
Vec3<double> m_minBBPts;
Vec3<double> m_maxBBPts;
Vec3<short> m_minBBVoxels;
Vec3<short> m_maxBBVoxels;
Vec3<short> m_barycenter;
double m_Q[3][3];
double m_D[3][3];
Vec3<double> m_barycenterPCA;
};
struct Tetrahedron {
public:
Vec3<double> m_pts[4];
unsigned char m_data;
};
//!
class TetrahedronSet : public PrimitiveSet {
friend class Volume;
public:
//! Destructor.
~TetrahedronSet(void);
//! Constructor.
TetrahedronSet();
const size_t GetNPrimitives() const { return m_tetrahedra.Size(); }
const size_t GetNPrimitivesOnSurf() const { return m_numTetrahedraOnSurface; }
const size_t GetNPrimitivesInsideSurf() const { return m_numTetrahedraInsideSurface; }
const Vec3<double>& GetMinBB() const { return m_minBB; }
const Vec3<double>& GetMaxBB() const { return m_maxBB; }
const Vec3<double>& GetBarycenter() const { return m_barycenter; }
const double GetEigenValue(AXIS axis) const { return m_D[axis][axis]; }
const double GetSacle() const { return m_scale; }
const double ComputeVolume() const;
const double ComputeMaxVolumeError() const;
void ComputeConvexHull(Mesh& meshCH, const size_t sampling = 1) const;
void ComputePrincipalAxes();
void AlignToPrincipalAxes();
void RevertAlignToPrincipalAxes();
void Clip(const Plane& plane, PrimitiveSet* const positivePart, PrimitiveSet* const negativePart) const;
void Intersect(const Plane& plane, SArray<Vec3<double> >* const positivePts,
SArray<Vec3<double> >* const negativePts, const size_t sampling) const;
void ComputeExteriorPoints(const Plane& plane, const Mesh& mesh,
SArray<Vec3<double> >* const exteriorPts) const;
void ComputeClippedVolumes(const Plane& plane, double& positiveVolume, double& negativeVolume) const;
void SelectOnSurface(PrimitiveSet* const onSurfP) const;
void ComputeBB();
void Convert(Mesh& mesh, const VOXEL_VALUE value) const;
inline bool Add(Tetrahedron& tetrahedron);
PrimitiveSet* Create() const
{
return new TetrahedronSet();
}
static const double EPS;
private:
void AddClippedTetrahedra(const Vec3<double> (&pts)[10], const int32_t nPts);
size_t m_numTetrahedraOnSurface;
size_t m_numTetrahedraInsideSurface;
double m_scale;
Vec3<double> m_minBB;
Vec3<double> m_maxBB;
Vec3<double> m_barycenter;
SArray<Tetrahedron, 8> m_tetrahedra;
double m_Q[3][3];
double m_D[3][3];
};
//!
class Volume {
public:
//! Destructor.
~Volume(void);
//! Constructor.
Volume();
//! Voxelize
template <class T>
void Voxelize(const T* const points, const uint32_t stridePoints, const uint32_t nPoints,
const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles,
const size_t dim, const Vec3<double>& barycenter, const double (&rot)[3][3]);
unsigned char& GetVoxel(const size_t i, const size_t j, const size_t k)
{
assert(i < m_dim[0] || i >= 0);
assert(j < m_dim[0] || j >= 0);
assert(k < m_dim[0] || k >= 0);
return m_data[i + j * m_dim[0] + k * m_dim[0] * m_dim[1]];
}
const unsigned char& GetVoxel(const size_t i, const size_t j, const size_t k) const
{
assert(i < m_dim[0] || i >= 0);
assert(j < m_dim[0] || j >= 0);
assert(k < m_dim[0] || k >= 0);
return m_data[i + j * m_dim[0] + k * m_dim[0] * m_dim[1]];
}
const size_t GetNPrimitivesOnSurf() const { return m_numVoxelsOnSurface; }
const size_t GetNPrimitivesInsideSurf() const { return m_numVoxelsInsideSurface; }
void Convert(Mesh& mesh, const VOXEL_VALUE value) const;
void Convert(VoxelSet& vset) const;
void Convert(TetrahedronSet& tset) const;
void AlignToPrincipalAxes(double (&rot)[3][3]) const;
private:
void FillOutsideSurface(const size_t i0, const size_t j0, const size_t k0, const size_t i1,
const size_t j1, const size_t k1);
void FillInsideSurface();
template <class T>
void ComputeBB(const T* const points, const uint32_t stridePoints, const uint32_t nPoints,
const Vec3<double>& barycenter, const double (&rot)[3][3]);
void Allocate();
void Free();
Vec3<double> m_minBB;
Vec3<double> m_maxBB;
double m_scale;
size_t m_dim[3]; //>! dim
size_t m_numVoxelsOnSurface;
size_t m_numVoxelsInsideSurface;
size_t m_numVoxelsOutsideSurface;
unsigned char* m_data;
};
int32_t TriBoxOverlap(const Vec3<double>& boxcenter, const Vec3<double>& boxhalfsize, const Vec3<double>& triver0,
const Vec3<double>& triver1, const Vec3<double>& triver2);
template <class T>
inline void ComputeAlignedPoint(const T* const points, const uint32_t idx, const Vec3<double>& barycenter,
const double (&rot)[3][3], Vec3<double>& pt){};
template <>
inline void ComputeAlignedPoint<float>(const float* const points, const uint32_t idx, const Vec3<double>& barycenter, const double (&rot)[3][3], Vec3<double>& pt)
{
double x = points[idx + 0] - barycenter[0];
double y = points[idx + 1] - barycenter[1];
double z = points[idx + 2] - barycenter[2];
pt[0] = rot[0][0] * x + rot[1][0] * y + rot[2][0] * z;
pt[1] = rot[0][1] * x + rot[1][1] * y + rot[2][1] * z;
pt[2] = rot[0][2] * x + rot[1][2] * y + rot[2][2] * z;
}
template <>
inline void ComputeAlignedPoint<double>(const double* const points, const uint32_t idx, const Vec3<double>& barycenter, const double (&rot)[3][3], Vec3<double>& pt)
{
double x = points[idx + 0] - barycenter[0];
double y = points[idx + 1] - barycenter[1];
double z = points[idx + 2] - barycenter[2];
pt[0] = rot[0][0] * x + rot[1][0] * y + rot[2][0] * z;
pt[1] = rot[0][1] * x + rot[1][1] * y + rot[2][1] * z;
pt[2] = rot[0][2] * x + rot[1][2] * y + rot[2][2] * z;
}
template <class T>
void Volume::ComputeBB(const T* const points, const uint32_t stridePoints, const uint32_t nPoints,
const Vec3<double>& barycenter, const double (&rot)[3][3])
{
Vec3<double> pt;
ComputeAlignedPoint(points, 0, barycenter, rot, pt);
m_maxBB = pt;
m_minBB = pt;
for (uint32_t v = 1; v < nPoints; ++v) {
ComputeAlignedPoint(points, v * stridePoints, barycenter, rot, pt);
for (int32_t i = 0; i < 3; ++i) {
if (pt[i] < m_minBB[i])
m_minBB[i] = pt[i];
else if (pt[i] > m_maxBB[i])
m_maxBB[i] = pt[i];
}
}
}
template <class T>
void Volume::Voxelize(const T* const points, const uint32_t stridePoints, const uint32_t nPoints,
const int32_t* const triangles, const uint32_t strideTriangles, const uint32_t nTriangles,
const size_t dim, const Vec3<double>& barycenter, const double (&rot)[3][3])
{
if (nPoints == 0) {
return;
}
ComputeBB(points, stridePoints, nPoints, barycenter, rot);
double d[3] = { m_maxBB[0] - m_minBB[0], m_maxBB[1] - m_minBB[1], m_maxBB[2] - m_minBB[2] };
double r;
if (d[0] > d[1] && d[0] > d[2]) {
r = d[0];
m_dim[0] = dim;
m_dim[1] = 2 + static_cast<size_t>(dim * d[1] / d[0]);
m_dim[2] = 2 + static_cast<size_t>(dim * d[2] / d[0]);
}
else if (d[1] > d[0] && d[1] > d[2]) {
r = d[1];
m_dim[1] = dim;
m_dim[0] = 2 + static_cast<size_t>(dim * d[0] / d[1]);
m_dim[2] = 2 + static_cast<size_t>(dim * d[2] / d[1]);
}
else {
r = d[2];
m_dim[2] = dim;
m_dim[0] = 2 + static_cast<size_t>(dim * d[0] / d[2]);
m_dim[1] = 2 + static_cast<size_t>(dim * d[1] / d[2]);
}
m_scale = r / (dim - 1);
double invScale = (dim - 1) / r;
Allocate();
m_numVoxelsOnSurface = 0;
m_numVoxelsInsideSurface = 0;
m_numVoxelsOutsideSurface = 0;
Vec3<double> p[3];
size_t i, j, k;
size_t i0, j0, k0;
size_t i1, j1, k1;
Vec3<double> boxcenter;
Vec3<double> pt;
const Vec3<double> boxhalfsize(0.5, 0.5, 0.5);
for (size_t t = 0, ti = 0; t < nTriangles; ++t, ti += strideTriangles) {
Vec3<int32_t> tri(triangles[ti + 0],
triangles[ti + 1],
triangles[ti + 2]);
for (int32_t c = 0; c < 3; ++c) {
ComputeAlignedPoint(points, tri[c] * stridePoints, barycenter, rot, pt);
p[c][0] = (pt[0] - m_minBB[0]) * invScale;
p[c][1] = (pt[1] - m_minBB[1]) * invScale;
p[c][2] = (pt[2] - m_minBB[2]) * invScale;
i = static_cast<size_t>(p[c][0] + 0.5);
j = static_cast<size_t>(p[c][1] + 0.5);
k = static_cast<size_t>(p[c][2] + 0.5);
assert(i < m_dim[0] && i >= 0 && j < m_dim[1] && j >= 0 && k < m_dim[2] && k >= 0);
if (c == 0) {
i0 = i1 = i;
j0 = j1 = j;
k0 = k1 = k;
}
else {
if (i < i0)
i0 = i;
if (j < j0)
j0 = j;
if (k < k0)
k0 = k;
if (i > i1)
i1 = i;
if (j > j1)
j1 = j;
if (k > k1)
k1 = k;
}
}
if (i0 > 0)
--i0;
if (j0 > 0)
--j0;
if (k0 > 0)
--k0;
if (i1 < m_dim[0])
++i1;
if (j1 < m_dim[1])
++j1;
if (k1 < m_dim[2])
++k1;
for (size_t i = i0; i < i1; ++i) {
boxcenter[0] = (double)i;
for (size_t j = j0; j < j1; ++j) {
boxcenter[1] = (double)j;
for (size_t k = k0; k < k1; ++k) {
boxcenter[2] = (double)k;
int32_t res = TriBoxOverlap(boxcenter, boxhalfsize, p[0], p[1], p[2]);
unsigned char& value = GetVoxel(i, j, k);
if (res == 1 && value == PRIMITIVE_UNDEFINED) {
value = PRIMITIVE_ON_SURFACE;
++m_numVoxelsOnSurface;
}
}
}
}
}
FillOutsideSurface(0, 0, 0, m_dim[0], m_dim[1], 1);
FillOutsideSurface(0, 0, m_dim[2] - 1, m_dim[0], m_dim[1], m_dim[2]);
FillOutsideSurface(0, 0, 0, m_dim[0], 1, m_dim[2]);
FillOutsideSurface(0, m_dim[1] - 1, 0, m_dim[0], m_dim[1], m_dim[2]);
FillOutsideSurface(0, 0, 0, 1, m_dim[1], m_dim[2]);
FillOutsideSurface(m_dim[0] - 1, 0, 0, m_dim[0], m_dim[1], m_dim[2]);
FillInsideSurface();
}
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif // VHACD_VOLUME_H
| 17,055 | C | 38.573086 | 756 | 0.612899 |
Subsets and Splits