file_path
stringlengths 21
224
| content
stringlengths 0
80.8M
|
---|---|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxHairSystemDesc.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_HAIRSYSTEM_DESC_H
#define PX_HAIRSYSTEM_DESC_H
/** \addtogroup geomutils
@{
*/
#include "foundation/PxFlags.h"
#include "common/PxCoreUtilityTypes.h"
#if PX_ENABLE_FEATURES_UNDER_CONSTRUCTION
#if !PX_DOXYGEN
namespace physx
{
#endif
struct PxHairSystemDescFlag
{
enum Enum
{
/**
Determines whether or not to allocate memory on device (GPU) or on Host (CPU)
*/
eDEVICE_MEMORY = (1<<0)
};
};
/**
\brief collection of set bits defined in PxHairSystemDescFlag
\see PxHairSystemDescFlag
*/
typedef PxFlags<PxHairSystemDescFlag::Enum, PxU16> PxHairSystemDescFlags;
PX_FLAGS_OPERATORS(PxHairSystemDescFlag::Enum, PxU16)
/**
\brief Descriptor class for #PxHairSystem
\note The data is *copied* when a PxHairSystem object is created from this
descriptor. The user may discard the data after the call.
\see PxHairSystem PxHairSystemGeometry PxShape PxPhysics.createHairSystem()
PxCooking.createHairSystem()
*/
class PxHairSystemDesc
{
public:
/**
\brief The number of strands in this hair system
<b>Default:</b> 0
*/
PxU32 numStrands;
/**
\brief The length of a hair segment
<b>Default:</b> 0.1
*/
PxReal segmentLength;
/**
\brief The radius of a hair segment
<b>Default:</b> 0.01
*/
PxReal segmentRadius;
/**
\brief Specifies the number of vertices each strand is composed of.
Length must be equal to numStrands, elements assumed to be of
type PxU32. Number of segments = numVerticesPerStrand - 1.
<b>Default:</b> NULL
*/
PxBoundedData numVerticesPerStrand;
/**
\brief Vertex positions and inverse mass [x,y,z,1/m] in PxBoundedData format.
If count equal to numStrands, assumed to be strand root positions,
otherwise positions of all vertices sorted by strands and increasing
from root towards tip of strand.
Type assumed to be of PxReal.
<b>Default:</b> NULL
*/
PxBoundedData vertices;
/**
\brief Vertex velocities in PxBoundedData format.
If NULL, zero velocity is assumed.
Type assumed to be of PxReal.
<b>Default:</b> NULL
*/
PxBoundedData velocities;
/**
\brief Flags bits, combined from values of the enum ::PxHairSystemDesc
<b>Default:</b> 0
*/
PxHairSystemDescFlags flags;
/**
\brief Constructor with default initialization
*/
PX_INLINE PxHairSystemDesc();
/**
\brief (re)sets the structure to the default.
*/
PX_INLINE void setToDefault();
/**
\brief Check whether the descriptor is valid
\return True if the current settings are valid
*/
PX_INLINE bool isValid() const;
};
PX_INLINE PxHairSystemDesc::PxHairSystemDesc()
{
numStrands = 0;
segmentLength = 0.1f;
segmentRadius = 0.01f;
}
PX_INLINE void PxHairSystemDesc::setToDefault()
{
*this = PxHairSystemDesc();
}
PX_INLINE bool PxHairSystemDesc::isValid() const
{
if (segmentLength < 0.0f || segmentRadius < 0.0f)
return false;
if (2.0f * segmentRadius >= segmentLength)
return false;
if (numStrands == 0)
return false;
if (numVerticesPerStrand.count != numStrands)
return false;
PxU32 totalNumVertices = 0;
for (PxU32 i = 0; i < numVerticesPerStrand.count; i++)
{
const PxU32 numVertices = numVerticesPerStrand.at<PxU32>(i);
totalNumVertices += numVertices;
if (numVertices < 2)
{
return false;
}
}
if (vertices.count != totalNumVertices && vertices.count != numStrands)
return false;
if (velocities.count != totalNumVertices && velocities.count != 0)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxParticleSystemGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_PARTICLESYSTEM_GEOMETRY_H
#define PX_PARTICLESYSTEM_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#include "common/PxCoreUtilityTypes.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxVec4.h"
#include "PxParticleSystem.h"
#include "PxParticleSolverType.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Particle system geometry class.
*/
class PxParticleSystemGeometry : public PxGeometry
{
public:
/**
\brief Default constructor.
Creates an empty object with no particles.
*/
PX_INLINE PxParticleSystemGeometry() : PxGeometry(PxGeometryType::ePARTICLESYSTEM){}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxParticleSystemGeometry(const PxParticleSystemGeometry& that) : PxGeometry(that) {}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxParticleSystemGeometry& that)
{
mType = that.mType;
mSolverType = that.mSolverType;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid for shape creation.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_FORCE_INLINE bool isValid() const
{
if(mType != PxGeometryType::ePARTICLESYSTEM)
return false;
return true;
}
PxParticleSolverType::Enum mSolverType;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGeometryQuery.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GEOMETRY_QUERY_H
#define PX_GEOMETRY_QUERY_H
/**
\brief Maximum sweep distance for scene sweeps. The distance parameter for sweep functions will be clamped to this value.
The reason for this is GJK support cannot be evaluated near infinity. A viable alternative can be a sweep followed by an infinite raycast.
@see PxScene
*/
#define PX_MAX_SWEEP_DISTANCE 1e8f
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "geometry/PxGeometryHit.h"
#include "geometry/PxGeometryQueryFlags.h"
#include "geometry/PxGeometryQueryContext.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxGeometry;
class PxContactBuffer;
/**
\brief Collection of geometry object queries (sweeps, raycasts, overlaps, ...).
*/
class PxGeometryQuery
{
public:
/**
\brief Raycast test against a geometry object.
All geometry types are supported except PxParticleSystemGeometry, PxTetrahedronMeshGeometry and PxHairSystemGeometry.
\param[in] origin The origin of the ray to test the geometry object against
\param[in] unitDir Normalized direction of the ray to test the geometry object against
\param[in] geom The geometry object to test the ray against
\param[in] pose Pose of the geometry object
\param[in] maxDist Maximum ray length, has to be in the [0, inf) range
\param[in] hitFlags Specification of the kind of information to retrieve on hit. Combination of #PxHitFlag flags
\param[in] maxHits max number of returned hits = size of 'rayHits' buffer
\param[out] rayHits Raycast hits information
\param[in] stride Stride value (in number of bytes) for rayHits array. Typically sizeof(PxGeomRaycastHit) for packed arrays.
\param[in] queryFlags Optional flags controlling the query.
\param[in] threadContext Optional user-defined per-thread context.
\return Number of hits between the ray and the geometry object
@see PxGeomRaycastHit PxGeometry PxTransform
*/
PX_PHYSX_COMMON_API static PxU32 raycast( const PxVec3& origin, const PxVec3& unitDir,
const PxGeometry& geom, const PxTransform& pose,
PxReal maxDist, PxHitFlags hitFlags,
PxU32 maxHits, PxGeomRaycastHit* PX_RESTRICT rayHits, PxU32 stride = sizeof(PxGeomRaycastHit), PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT,
PxRaycastThreadContext* threadContext = NULL);
/**
\brief Overlap test for two geometry objects.
All combinations are supported except:
\li PxPlaneGeometry vs. {PxPlaneGeometry, PxTriangleMeshGeometry, PxHeightFieldGeometry}
\li PxTriangleMeshGeometry vs. PxHeightFieldGeometry
\li PxHeightFieldGeometry vs. PxHeightFieldGeometry
\li Anything involving PxParticleSystemGeometry, PxTetrahedronMeshGeometry or PxHairSystemGeometry.
\param[in] geom0 The first geometry object
\param[in] pose0 Pose of the first geometry object
\param[in] geom1 The second geometry object
\param[in] pose1 Pose of the second geometry object
\param[in] queryFlags Optional flags controlling the query.
\param[in] threadContext Optional user-defined per-thread context.
\return True if the two geometry objects overlap
@see PxGeometry PxTransform
*/
PX_PHYSX_COMMON_API static bool overlap(const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT, PxOverlapThreadContext* threadContext=NULL);
/**
\brief Sweep a specified geometry object in space and test for collision with a given object.
The following combinations are supported.
\li PxSphereGeometry vs. {PxSphereGeometry, PxPlaneGeometry, PxCapsuleGeometry, PxBoxGeometry, PxConvexMeshGeometry, PxTriangleMeshGeometry, PxHeightFieldGeometry}
\li PxCapsuleGeometry vs. {PxSphereGeometry, PxPlaneGeometry, PxCapsuleGeometry, PxBoxGeometry, PxConvexMeshGeometry, PxTriangleMeshGeometry, PxHeightFieldGeometry}
\li PxBoxGeometry vs. {PxSphereGeometry, PxPlaneGeometry, PxCapsuleGeometry, PxBoxGeometry, PxConvexMeshGeometry, PxTriangleMeshGeometry, PxHeightFieldGeometry}
\li PxConvexMeshGeometry vs. {PxSphereGeometry, PxPlaneGeometry, PxCapsuleGeometry, PxBoxGeometry, PxConvexMeshGeometry, PxTriangleMeshGeometry, PxHeightFieldGeometry}
\param[in] unitDir Normalized direction along which object geom0 should be swept
\param[in] maxDist Maximum sweep distance, has to be in the [0, inf) range
\param[in] geom0 The geometry object to sweep. Supported geometries are #PxSphereGeometry, #PxCapsuleGeometry, #PxBoxGeometry and #PxConvexMeshGeometry
\param[in] pose0 Pose of the geometry object to sweep
\param[in] geom1 The geometry object to test the sweep against
\param[in] pose1 Pose of the geometry object to sweep against
\param[out] sweepHit The sweep hit information. Only valid if this method returns true.
\param[in] hitFlags Specify which properties per hit should be computed and written to result hit array. Combination of #PxHitFlag flags
\param[in] inflation Surface of the swept shape is additively extruded in the normal direction, rounding corners and edges.
\param[in] queryFlags Optional flags controlling the query.
\param[in] threadContext Optional user-defined per-thread context.
\return True if the swept geometry object geom0 hits the object geom1
@see PxGeomSweepHit PxGeometry PxTransform
*/
PX_PHYSX_COMMON_API static bool sweep( const PxVec3& unitDir, const PxReal maxDist,
const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
PxGeomSweepHit& sweepHit, PxHitFlags hitFlags = PxHitFlag::eDEFAULT,
const PxReal inflation = 0.0f, PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT,
PxSweepThreadContext* threadContext = NULL);
/**
\brief Compute minimum translational distance (MTD) between two geometry objects.
All combinations of geom objects are supported except:
- plane/plane
- plane/mesh
- plane/heightfield
- mesh/mesh
- mesh/heightfield
- heightfield/heightfield
- anything involving PxParticleSystemGeometry, PxTetrahedronMeshGeometry or PxHairSystemGeometry
The function returns a unit vector ('direction') and a penetration depth ('depth').
The depenetration vector D = direction * depth should be applied to the first object, to
get out of the second object.
Returned depth should always be positive or null.
If objects do not overlap, the function can not compute the MTD and returns false.
\param[out] direction Computed MTD unit direction
\param[out] depth Penetration depth. Always positive or null.
\param[in] geom0 The first geometry object
\param[in] pose0 Pose of the first geometry object
\param[in] geom1 The second geometry object
\param[in] pose1 Pose of the second geometry object
\param[in] queryFlags Optional flags controlling the query.
\return True if the MTD has successfully been computed, i.e. if objects do overlap.
@see PxGeometry PxTransform
*/
PX_PHYSX_COMMON_API static bool computePenetration( PxVec3& direction, PxF32& depth,
const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT);
/**
\brief Computes distance between a point and a geometry object.
Currently supported geometry objects: box, sphere, capsule, convex, mesh.
\note For meshes, only the BVH34 midphase data-structure is supported.
\param[in] point The point P
\param[in] geom The geometry object
\param[in] pose Pose of the geometry object
\param[out] closestPoint Optionally returned closest point to P on the geom object. Only valid when returned distance is strictly positive.
\param[out] closestIndex Optionally returned closest (triangle) index. Only valid for triangle meshes.
\param[in] queryFlags Optional flags controlling the query.
\return Square distance between the point and the geom object, or 0.0 if the point is inside the object, or -1.0 if an error occured (geometry type is not supported, or invalid pose)
@see PxGeometry PxTransform
*/
PX_PHYSX_COMMON_API static PxReal pointDistance(const PxVec3& point, const PxGeometry& geom, const PxTransform& pose,
PxVec3* closestPoint=NULL, PxU32* closestIndex=NULL,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT);
/**
\brief computes the bounds for a geometry object
\param[out] bounds Returned computed bounds
\param[in] geom The geometry object
\param[in] pose Pose of the geometry object
\param[in] offset Offset for computed bounds. This value is added to the geom's extents.
\param[in] inflation Scale factor for computed bounds. The geom's extents are multiplied by this value.
\param[in] queryFlags Optional flags controlling the query.
@see PxGeometry PxTransform
*/
PX_PHYSX_COMMON_API static void computeGeomBounds(PxBounds3& bounds, const PxGeometry& geom, const PxTransform& pose, float offset=0.0f, float inflation=1.0f, PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT);
/**
\brief Generate collision contacts between a convex geometry and a single triangle
\param[in] geom The geometry object. Can be a capsule, a box or a convex mesh
\param[in] pose Pose of the geometry object
\param[in] triangleVertices Triangle vertices in local space
\param[in] triangleIndex Triangle index
\param[in] contactDistance The distance at which contacts begin to be generated between the pairs
\param[in] meshContactMargin The mesh contact margin.
\param[in] toleranceLength The toleranceLength. Used for scaling distance-based thresholds internally to produce appropriate results given simulations in different units
\param[out] contactBuffer A buffer to write contacts to.
\return True if there was collision
*/
PX_PHYSX_COMMON_API static bool generateTriangleContacts(const PxGeometry& geom, const PxTransform& pose, const PxVec3 triangleVertices[3], PxU32 triangleIndex, PxReal contactDistance, PxReal meshContactMargin, PxReal toleranceLength, PxContactBuffer& contactBuffer);
/**
\brief Checks if provided geometry is valid.
\param[in] geom The geometry object.
\return True if geometry is valid.
@see PxGeometry
*/
PX_PHYSX_COMMON_API static bool isValid(const PxGeometry& geom);
};
#if !PX_DOXYGEN
}
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGeometryQueryContext.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GEOMETRY_QUERY_CONTEXT_H
#define PX_GEOMETRY_QUERY_CONTEXT_H
#include "common/PxPhysXCommonConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief A per-thread context passed to low-level query functions.
This is a user-defined optional parameter that gets passed down to low-level query functions (raycast / overlap / sweep).
This is not used directly in PhysX, although the context in this case is the PxHitCallback used in the query. This allows
user-defined query functions, such as the ones from PxCustomGeometry, to get some additional data about the query. In this
case this is a 'per-query' context rather than 'per-thread', but the initial goal of this parameter is to give custom
query callbacks access to per-thread data structures (e.g. caches) that could be needed to implement the callbacks.
In any case this is mostly for user-controlled query systems.
*/
struct PxQueryThreadContext
{
};
/**
\brief A per-thread context passed to low-level raycast functions.
*/
typedef PxQueryThreadContext PxRaycastThreadContext;
/**
\brief A per-thread context passed to low-level overlap functions.
*/
typedef PxQueryThreadContext PxOverlapThreadContext;
/**
\brief A per-thread context passed to low-level sweep functions.
*/
typedef PxQueryThreadContext PxSweepThreadContext;
#if !PX_DOXYGEN
}
#endif
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGjkQuery.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GJK_QUERY_H
#define PX_GJK_QUERY_H
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVec3.h"
#include "foundation/PxQuat.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Collection of GJK query functions (sweeps, raycasts, overlaps, ...).
*/
class PxGjkQuery
{
public:
/**
\brief Abstract interface for a user defined shape GJK mapping support.
A user defined shape consists of a core shape and a margin. If the distance
between two shapes' cores is equal to the sum of their margins, these shapes are
considered touching.
*/
struct Support
{
/* Virtual destructor */
virtual ~Support() {}
/**
\brief Return the user defined shape margin. Margin should be greater than or equal to 0
\return Margin.
*/
virtual PxReal getMargin() const = 0;
/**
\brief Return the farthest point on the user defined shape's core in given direction.
\param[in] dir Direction
\return Farthest point in given direction.
*/
virtual PxVec3 supportLocal(const PxVec3& dir) const = 0;
};
/**
\brief Computes proximity information for two shapes using GJK-EPA algorithm
\param[in] a Shape A support mapping
\param[in] b Shape B support mapping
\param[in] poseA Shape A transformation
\param[in] poseB Shape B transformation
\param[in] contactDistance The distance at which proximity info begins to be computed between the shapes
\param[in] toleranceLength The toleranceLength. Used for scaling distance-based thresholds internally to produce appropriate results given simulations in different units
\param[out] pointA The closest/deepest point on shape A surface
\param[out] pointB The closest/deepest point on shape B surface
\param[out] separatingAxis Translating shape B along 'separatingAxis' by 'separation' makes the shapes touching
\param[out] separation Translating shape B along 'separatingAxis' by 'separation' makes the shapes touching
\return False if the distance greater than contactDistance.
*/
PX_PHYSX_COMMON_API static bool proximityInfo(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB,
PxReal contactDistance, PxReal toleranceLength, PxVec3& pointA, PxVec3& pointB, PxVec3& separatingAxis, PxReal& separation);
/**
\brief Raycast test against the given shape.
\param[in] shape Shape support mapping
\param[in] pose Shape transformation
\param[in] rayStart The start point of the ray to test the shape against
\param[in] unitDir Normalized direction of the ray to test the shape against
\param[in] maxDist Maximum ray length, has to be in the [0, inf) range
\param[out] t Hit distance
\param[out] n Hit normal
\param[out] p Hit point
\return True if there is a hit.
*/
PX_PHYSX_COMMON_API static bool raycast(const Support& shape, const PxTransform& pose, const PxVec3& rayStart,
const PxVec3& unitDir, PxReal maxDist, PxReal& t, PxVec3& n, PxVec3& p);
/**
\brief Overlap test for two shapes.
\param[in] a Shape A support mapping
\param[in] b Shape B support mapping
\param[in] poseA Shape A transformation
\param[in] poseB Shape B transformation
\return True if the shapes overlap.
*/
PX_PHYSX_COMMON_API static bool overlap(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB);
/**
\brief Sweep the shape B in space and test for collision with the shape A.
\param[in] a Shape A support mapping
\param[in] b Shape B support mapping
\param[in] poseA Shape A transformation
\param[in] poseB Shape B transformation
\param[in] unitDir Normalized direction of the ray to test the shape against
\param[in] maxDist Maximum ray length, has to be in the [0, inf) range
\param[out] t Hit distance
\param[out] n Hit normal
\param[out] p Hit point
\return True if there is a hit.
*/
PX_PHYSX_COMMON_API static bool sweep(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB,
const PxVec3& unitDir, PxReal maxDist, PxReal& t, PxVec3& n, PxVec3& p);
};
#if !PX_DOXYGEN
}
#endif
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxTetrahedronMeshGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TETRAHEDRON_GEOMETRY_H
#define PX_TETRAHEDRON_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#include "geometry/PxMeshScale.h"
#include "common/PxCoreUtilityTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxTetrahedronMesh;
/**
\brief Tetrahedron mesh geometry class.
This class wraps a tetrahedron mesh such that it can be used in contexts where a PxGeometry type is needed.
*/
class PxTetrahedronMeshGeometry : public PxGeometry
{
public:
/**
\brief Constructor. By default creates an empty object with a NULL mesh and identity scale.
*/
PX_INLINE PxTetrahedronMeshGeometry(PxTetrahedronMesh* mesh = NULL) :
PxGeometry(PxGeometryType::eTETRAHEDRONMESH),
tetrahedronMesh(mesh)
{}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxTetrahedronMeshGeometry(const PxTetrahedronMeshGeometry& that) :
PxGeometry(that),
tetrahedronMesh(that.tetrahedronMesh)
{}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxTetrahedronMeshGeometry& that)
{
mType = that.mType;
tetrahedronMesh = that.tetrahedronMesh;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid for shape creation.
\note A valid tetrahedron mesh has a positive scale value in each direction (scale.scale.x > 0, scale.scale.y > 0, scale.scale.z > 0).
It is illegal to call PxRigidActor::createShape and PxPhysics::createShape with a tetrahedron mesh that has zero extents in any direction.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_INLINE bool isValid() const;
public:
PxTetrahedronMesh* tetrahedronMesh; //!< A reference to the mesh object.
};
PX_INLINE bool PxTetrahedronMeshGeometry::isValid() const
{
if(mType != PxGeometryType::eTETRAHEDRONMESH)
return false;
if(!tetrahedronMesh)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxHairSystemGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#ifndef PX_HAIRSYSTEM_GEOMETRY_H
#define PX_HAIRSYSTEM_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Hair system geometry class.
*/
class PxHairSystemGeometry : public PxGeometry
{
public:
/**
\brief Default constructor.
*/
PX_INLINE PxHairSystemGeometry() : PxGeometry(PxGeometryType::eHAIRSYSTEM) {}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid for shape creation.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_FORCE_INLINE bool isValid() const
{
if(mType != PxGeometryType::eHAIRSYSTEM)
return false;
return true;
}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxMeshQuery.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_MESH_QUERY_H
#define PX_MESH_QUERY_H
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "geometry/PxGeometryHit.h"
#include "geometry/PxGeometryQueryFlags.h"
#include "geometry/PxReportCallback.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxGeometry;
class PxConvexMeshGeometry;
class PxTriangleMeshGeometry;
class PxHeightFieldGeometry;
class PxTriangle;
struct PxMeshMeshQueryFlag
{
enum Enum
{
eDEFAULT = 0, //!< Report all overlaps
eDISCARD_COPLANAR = (1<<0), //!< Ignore coplanar triangle-triangle overlaps
eRESERVED = (1<<1), //!< Reserved flag
eRESERVED1 = (1<<1), //!< Reserved flag
eRESERVED2 = (1<<2), //!< Reserved flag
eRESERVED3 = (1<<3) //!< Reserved flag
};
};
PX_FLAGS_TYPEDEF(PxMeshMeshQueryFlag, PxU32)
class PxMeshQuery
{
public:
/**
\brief Retrieves triangle data from a triangle ID.
This function can be used together with #findOverlapTriangleMesh() to retrieve triangle properties.
\param[in] triGeom Geometry of the triangle mesh to extract the triangle from.
\param[in] transform Transform for the triangle mesh
\param[in] triangleIndex The index of the triangle to retrieve.
\param[out] triangle Triangle points in world space.
\param[out] vertexIndices Returned vertex indices for given triangle
\param[out] adjacencyIndices Returned 3 triangle adjacency internal face indices (0xFFFFFFFF if no adjacency). The mesh must be cooked with cooking param buildTriangleAdjacencies enabled.
\note This function will flip the triangle normal whenever triGeom.scale.hasNegativeDeterminant() is true.
@see PxTriangle PxTriangleFlags PxTriangleID findOverlapTriangleMesh()
*/
PX_PHYSX_COMMON_API static void getTriangle(const PxTriangleMeshGeometry& triGeom, const PxTransform& transform, PxTriangleID triangleIndex, PxTriangle& triangle, PxU32* vertexIndices=NULL, PxU32* adjacencyIndices=NULL);
/**
\brief Retrieves triangle data from a triangle ID.
This function can be used together with #findOverlapHeightField() to retrieve triangle properties.
\param[in] hfGeom Geometry of the height field to extract the triangle from.
\param[in] transform Transform for the height field.
\param[in] triangleIndex The index of the triangle to retrieve.
\param[out] triangle Triangle points in world space.
\param[out] vertexIndices Returned vertex indices for given triangle
\param[out] adjacencyIndices Returned 3 triangle adjacency triangle indices (0xFFFFFFFF if no adjacency).
\note This function will flip the triangle normal whenever triGeom.scale.hasNegativeDeterminant() is true.
\note TriangleIndex is an index used in internal format, which does have an index out of the bounds in last row.
To traverse all tri indices in the HF, the following code can be applied:
for (PxU32 row = 0; row < (nbRows - 1); row++)
{
for (PxU32 col = 0; col < (nbCols - 1); col++)
{
for (PxU32 k = 0; k < 2; k++)
{
const PxU32 triIndex = 2 * (row*nbCols + col) + k;
....
}
}
}
@see PxTriangle PxTriangleFlags PxTriangleID findOverlapHeightField()
*/
PX_PHYSX_COMMON_API static void getTriangle(const PxHeightFieldGeometry& hfGeom, const PxTransform& transform, PxTriangleID triangleIndex, PxTriangle& triangle, PxU32* vertexIndices=NULL, PxU32* adjacencyIndices=NULL);
/**
\brief Find the mesh triangles which touch the specified geometry object.
For mesh-vs-mesh overlap tests, please use the specialized function below.
Returned triangle indices can be used with #getTriangle() to retrieve the triangle properties.
\param[in] geom The geometry object to test for mesh triangle overlaps. Supported geometries are #PxSphereGeometry, #PxCapsuleGeometry and #PxBoxGeometry
\param[in] geomPose Pose of the geometry object
\param[in] meshGeom The triangle mesh geometry to check overlap against
\param[in] meshPose Pose of the triangle mesh
\param[out] results Indices of overlapping triangles
\param[in] maxResults Size of 'results' buffer
\param[in] startIndex Index of first result to be retrieved. Previous indices are skipped.
\param[out] overflow True if a buffer overflow occurred
\param[in] queryFlags Optional flags controlling the query.
\return Number of overlaps found, i.e. number of elements written to the results buffer
@see PxTriangleMeshGeometry getTriangle() PxGeometryQueryFlags
*/
PX_PHYSX_COMMON_API static PxU32 findOverlapTriangleMesh( const PxGeometry& geom, const PxTransform& geomPose,
const PxTriangleMeshGeometry& meshGeom, const PxTransform& meshPose,
PxU32* results, PxU32 maxResults, PxU32 startIndex, bool& overflow,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT);
/**
\brief Mesh-vs-mesh overlap test
A specialized findOverlapTriangleMesh function for mesh-vs-mesh. The other findOverlapTriangleMesh() function above cannot be used
directly since it only returns a single set of triangle indices that belongs to one of the meshes only. This function returns pairs
of triangle indices that belong to both the first & second input meshes.
Returned triangle indices can be used with #getTriangle() to retrieve the triangle properties.
\note This is only implemented for the PxMeshMidPhase::eBVH34 data structure.
\param[in] callback The callback object used to report results
\param[in] meshGeom0 First triangle mesh geometry
\param[in] meshPose0 Pose of first triangle mesh geometry
\param[in] meshGeom1 Second triangle mesh geometry
\param[in] meshPose1 Pose of second triangle mesh geometry
\param[in] queryFlags Optional flags controlling the query.
\param[in] meshMeshFlags Optional flags controlling the query.
\param[in] tolerance Optional tolerance distance
\return true if an overlap has been detected, false if the meshes are disjoint
@see PxTriangleMeshGeometry getTriangle() PxReportCallback PxGeometryQueryFlags PxMeshMeshQueryFlags
*/
PX_PHYSX_COMMON_API static bool findOverlapTriangleMesh(PxReportCallback<PxGeomIndexPair>& callback,
const PxTriangleMeshGeometry& meshGeom0, const PxTransform& meshPose0,
const PxTriangleMeshGeometry& meshGeom1, const PxTransform& meshPose1,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT,
PxMeshMeshQueryFlags meshMeshFlags = PxMeshMeshQueryFlag::eDEFAULT,
float tolerance = 0.0f);
/**
\brief Find the height field triangles which touch the specified geometry object.
Returned triangle indices can be used with #getTriangle() to retrieve the triangle properties.
\param[in] geom The geometry object to test for height field overlaps. Supported geometries are #PxSphereGeometry, #PxCapsuleGeometry and #PxBoxGeometry. The sphere and capsule queries are currently conservative estimates.
\param[in] geomPose Pose of the geometry object
\param[in] hfGeom The height field geometry to check overlap against
\param[in] hfPose Pose of the height field
\param[out] results Indices of overlapping triangles
\param[in] maxResults Size of 'results' buffer
\param[in] startIndex Index of first result to be retrieved. Previous indices are skipped.
\param[out] overflow True if a buffer overflow occurred
\param[in] queryFlags Optional flags controlling the query.
\return Number of overlaps found, i.e. number of elements written to the results buffer
@see PxHeightFieldGeometry getTriangle() PxGeometryQueryFlags
*/
PX_PHYSX_COMMON_API static PxU32 findOverlapHeightField(const PxGeometry& geom, const PxTransform& geomPose,
const PxHeightFieldGeometry& hfGeom, const PxTransform& hfPose,
PxU32* results, PxU32 maxResults, PxU32 startIndex, bool& overflow,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT);
/**
\brief Sweep a specified geometry object in space and test for collision with a set of given triangles.
This function simply sweeps input geometry against each input triangle, in the order they are given.
This is an O(N) operation with N = number of input triangles. It does not use any particular acceleration structure.
\param[in] unitDir Normalized direction of the sweep.
\param[in] distance Sweep distance. Needs to be larger than 0. Clamped to PX_MAX_SWEEP_DISTANCE.
\param[in] geom The geometry object to sweep. Supported geometries are #PxSphereGeometry, #PxCapsuleGeometry and #PxBoxGeometry
\param[in] pose Pose of the geometry object to sweep.
\param[in] triangleCount Number of specified triangles
\param[in] triangles Array of triangles to sweep against
\param[out] sweepHit The sweep hit information. See the notes below for limitations about returned results.
\param[in] hitFlags Specification of the kind of information to retrieve on hit. Combination of #PxHitFlag flags. See the notes below for limitations about supported flags.
\param[in] cachedIndex Cached triangle index for subsequent calls. Cached triangle is tested first. Optional parameter.
\param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping. The sweep will register a hit as soon as the skin touches a shape, and will return the corresponding distance and normal.
\param[in] doubleSided Counterpart of PxMeshGeometryFlag::eDOUBLE_SIDED for input triangles.
\param[in] queryFlags Optional flags controlling the query.
\return True if the swept geometry object hits the specified triangles
\note Only the following geometry types are currently supported: PxSphereGeometry, PxCapsuleGeometry, PxBoxGeometry
\note If a shape from the scene is already overlapping with the query shape in its starting position, the hit is returned unless eASSUME_NO_INITIAL_OVERLAP was specified.
\note This function returns a single closest hit across all the input triangles. Multiple hits are not supported.
\note Supported hitFlags are PxHitFlag::eDEFAULT, PxHitFlag::eASSUME_NO_INITIAL_OVERLAP, PxHitFlag::ePRECISE_SWEEP, PxHitFlag::eMESH_BOTH_SIDES, PxHitFlag::eMESH_ANY.
\note ePOSITION is only defined when there is no initial overlap (sweepHit.hadInitialOverlap() == false)
\note The returned normal for initially overlapping sweeps is set to -unitDir.
\note Otherwise the returned normal is the front normal of the triangle even if PxHitFlag::eMESH_BOTH_SIDES is set.
\note The returned PxGeomSweepHit::faceIndex parameter will hold the index of the hit triangle in input array, i.e. the range is [0; triangleCount). For initially overlapping sweeps, this is the index of overlapping triangle.
\note The inflation parameter is not compatible with PxHitFlag::ePRECISE_SWEEP.
@see PxTriangle PxSweepHit PxGeometry PxTransform PxGeometryQueryFlags
*/
PX_PHYSX_COMMON_API static bool sweep(const PxVec3& unitDir,
const PxReal distance,
const PxGeometry& geom,
const PxTransform& pose,
PxU32 triangleCount,
const PxTriangle* triangles,
PxGeomSweepHit& sweepHit,
PxHitFlags hitFlags = PxHitFlag::eDEFAULT,
const PxU32* cachedIndex = NULL,
const PxReal inflation = 0.0f,
bool doubleSided = false,
PxGeometryQueryFlags queryFlags = PxGeometryQueryFlag::eDEFAULT);
};
#if !PX_DOXYGEN
}
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxReportCallback.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_REPORT_CALLBACK_H
#define PX_REPORT_CALLBACK_H
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxArray.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Base class for callback reporting an unknown number of items to users.
This can be used as-is and customized by users, or several pre-designed callbacks can be used instead (see below).
This design lets users decide how to retrieve the results of a query:
- either one by one via a regular callback
- or one batch at a time via a callback
- or written out directly to their own C-style buffer
- or pushed back to their own PxArray
- etc
@see PxRegularReportCallback PxLocalStorageReportCallback PxExternalStorageReportCallback PxDynamicArrayReportCallback
*/
template<class T>
class PxReportCallback
{
public:
PxReportCallback(T* buffer=NULL, PxU32 capacity=0) : mBuffer(buffer), mCapacity(capacity), mSize(0) {}
virtual ~PxReportCallback() {}
T* mBuffer; // Destination buffer for writing results. if NULL, the system will use its internal buffer and set that pointer as it sees fit.
// Otherwise users can set it to where they want the results to be written.
PxU32 mCapacity; // Capacity of mBuffer. If mBuffer is NULL, this controls how many items are reported to users at the same time (with a limit of 256).
PxU32 mSize; //!< Current number of items in the buffer. This is entirely managed by the system.
/**
\brief Reports query results to users.
This will be called by the system as many times as necessary to report all results.
\param[in] nbItems Number of reported items
\param[in] items array of reported items
\return true to continue the query, false to abort the query
*/
virtual bool flushResults(PxU32 nbItems, const T* items) = 0;
};
/**
\brief Regular report callback
This reports results like a regular callback would:
- without explicit buffer management from users
- by default, one item at a time
This customized callback sends results to users via the processResults() function.
The capacity parameter dictates how many items can be reported at a time,
i.e. how many times the flushResults/processResults function will be called by the system.
@see PxReportCallback
*/
template<class T>
class PxRegularReportCallback : public PxReportCallback<T>
{
public:
PxRegularReportCallback(const PxU32 capacity=1)
{
PX_ASSERT(capacity<=256);
this->mCapacity = capacity;
}
virtual bool flushResults(PxU32 nbItems, const T* items)
{
PX_ASSERT(nbItems<=this->mCapacity);
PX_ASSERT(items==this->mBuffer);
return processResults(nbItems, items);
}
/**
\brief Reports query results to users.
\param[in] nbItems Number of reported items
\param[in] items array of reported items
\return true to continue the query, false to abort the query
*/
virtual bool processResults(PxU32 nbItems, const T* items) = 0;
};
/**
\brief Local storage report callback
This is the same as a regular callback, except the destination buffer is a local buffer within the class.
This customized callback sends results to users via the processResults() function.
The capacity of the embedded buffer (determined by a template parameter) dictates how many items can be reported at a time,
i.e. how many times the flushResults/processResults function will be called by the system.
@see PxReportCallback
*/
template<class T, const PxU32 capacityT>
class PxLocalStorageReportCallback : public PxReportCallback<T>
{
T mLocalStorage[capacityT];
public:
PxLocalStorageReportCallback()
{
this->mBuffer = mLocalStorage;
this->mCapacity = capacityT;
}
virtual bool flushResults(PxU32 nbItems, const T* items)
{
PX_ASSERT(items==mLocalStorage);
PX_ASSERT(nbItems<=this->mCapacity);
return processResults(nbItems, items);
}
/**
\brief Reports query results to users.
\param[in] nbItems Number of reported items
\param[in] items array of reported items
\return true to continue the query, false to abort the query
*/
virtual bool processResults(PxU32 nbItems, const T* items) = 0;
};
/**
\brief External storage report callback
This is the same as a regular callback, except the destination buffer is a user-provided external buffer.
Typically the provided buffer can be larger here than for PxLocalStorageReportCallback, and it could
even be a scratchpad-kind of memory shared by multiple sub-systems.
This would be the same as having a C-style buffer to write out results in the query interface.
This customized callback sends results to users via the processResults() function.
The capacity parameter dictates how many items can be reported at a time,
i.e. how many times the flushResults/processResults function will be called by the system.
@see PxReportCallback
*/
template<class T>
class PxExternalStorageReportCallback : public PxReportCallback<T>
{
public:
PxExternalStorageReportCallback(T* buffer, PxU32 capacity)
{
this->mBuffer = buffer;
this->mCapacity = capacity;
}
virtual bool flushResults(PxU32 nbItems, const T* items)
{
PX_ASSERT(items==this->mBuffer);
PX_ASSERT(nbItems<=this->mCapacity);
return processResults(nbItems, items);
}
/**
\brief Reports query results to users.
\param[in] nbItems Number of reported items
\param[in] items array of reported items
\return true to continue the query, false to abort the query
*/
virtual bool processResults(PxU32 nbItems, const T* items) = 0;
};
/**
\brief Dynamic array report callback
This callback emulates the behavior of pushing results to a (user-provided) dynamic array.
This customized callback does not actually call users back during the query, results are
available afterwards in the provided dynamic array. This would be the same as having a PxArray
directly in the query interface.
@see PxReportCallback
*/
template<class T>
class PxDynamicArrayReportCallback : public PxReportCallback<T>
{
public:
PxDynamicArrayReportCallback(PxArray<T>& results) : mResults(results)
{
mResults.reserve(32);
this->mBuffer = mResults.begin();
this->mCapacity = mResults.capacity();
}
virtual bool flushResults(PxU32 nbItems, const T* /*items*/)
{
const PxU32 size = mResults.size();
const PxU32 capa = mResults.capacity();
const PxU32 newSize = size+nbItems;
PX_ASSERT(newSize<=capa);
mResults.forceSize_Unsafe(newSize);
if(newSize==capa)
{
const PxU32 newCapa = capa*2;
mResults.reserve(newCapa);
this->mBuffer = mResults.begin() + newSize;
this->mCapacity = mResults.capacity() - newSize;
}
return true;
}
PxArray<T>& mResults;
};
#if !PX_DOXYGEN
}
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxTetrahedronMesh.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TETRAHEDRON_MESH_H
#define PX_TETRAHEDRON_MESH_H
/** \addtogroup geomutils
@{ */
#include "foundation/PxVec3.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxUserAllocated.h"
#include "common/PxPhysXCommonConfig.h"
#include "common/PxBase.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
struct PxTetrahedronMeshFlag
{
enum Enum
{
e16_BIT_INDICES = (1 << 1) //!< The tetrahedron mesh has 16bits vertex indices
};
};
/**
\brief collection of set bits defined in PxTetrahedronMeshFlag.
@see PxTetrahedronMeshFlag
*/
typedef PxFlags<PxTetrahedronMeshFlag::Enum, PxU8> PxTetrahedronMeshFlags;
PX_FLAGS_OPERATORS(PxTetrahedronMeshFlag::Enum, PxU8)
/**
\brief A data container providing mass, rest pose and other information required for softbody simulation
Stores properties of softbody like inverse mass per node, rest pose matrix per tetrahedral element etc.
Mainly used internally to store runtime data.
*/
class PxSoftBodyAuxData : public PxRefCounted
{
public:
/**
\brief Decrements the reference count of a tetrahedron mesh and releases it if the new reference count is zero.
@see PxPhysics.createTetrahedronMesh()
*/
virtual void release() = 0;
/**
\brief Get the inverse mass of each vertex of the tetrahedron mesh.
\return PxReal* A pointer to an array of inverse mass for each vertex of the tetrahedron mesh. Size: number of vertices * sizeof(PxReal).
*/
virtual PxReal* getGridModelInvMass() = 0;
protected:
PX_INLINE PxSoftBodyAuxData(PxType concreteType, PxBaseFlags baseFlags) : PxRefCounted(concreteType, baseFlags) {}
PX_INLINE PxSoftBodyAuxData(PxBaseFlags baseFlags) : PxRefCounted(baseFlags) {}
virtual ~PxSoftBodyAuxData() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxSoftBodyAuxData", PxRefCounted); }
};
/**
\brief A tetramedron mesh, also called a 'tetrahedron soup'.
It is represented as an indexed tetrahedron list. There are no restrictions on the
tetrahedron data.
To avoid duplicating data when you have several instances of a particular
mesh positioned differently, you do not use this class to represent a
mesh object directly. Instead, you create an instance of this mesh via
the PxTetrahedronMeshGeometry and PxShape classes.
<h3>Creation</h3>
To create an instance of this class call PxPhysics::createTetrahedronMesh(),
and release() to delete it. This is only possible
once you have released all of its PxShape instances.
<h3>Visualizations:</h3>
\li #PxVisualizationParameter::eCOLLISION_AABBS
\li #PxVisualizationParameter::eCOLLISION_SHAPES
\li #PxVisualizationParameter::eCOLLISION_AXES
\li #PxVisualizationParameter::eCOLLISION_FNORMALS
\li #PxVisualizationParameter::eCOLLISION_EDGES
@see PxTetrahedronMeshDesc PxTetrahedronMeshGeometry PxShape PxPhysics.createTetrahedronMesh()
*/
class PxTetrahedronMesh : public PxRefCounted
{
public:
/**
\brief Returns the number of vertices.
\return number of vertices
@see getVertices()
*/
virtual PxU32 getNbVertices() const = 0;
/**
\brief Returns the vertices
\return array of vertices
@see getNbVertices()
*/
virtual const PxVec3* getVertices() const = 0;
/**
\brief Returns the number of tetrahedrons.
\return number of tetrahedrons
@see getTetrahedrons()
*/
virtual PxU32 getNbTetrahedrons() const = 0;
/**
\brief Returns the tetrahedron indices.
The indices can be 16 or 32bit depending on the number of tetrahedrons in the mesh.
Call getTetrahedronMeshFlags() to know if the indices are 16 or 32 bits.
The number of indices is the number of tetrahedrons * 4.
\return array of tetrahedrons
@see getNbTetrahedron() getTetrahedronMeshFlags() getTetrahedraRemap()
*/
virtual const void* getTetrahedrons() const = 0;
/**
\brief Reads the PxTetrahedronMesh flags.
See the list of flags #PxTetrahedronMeshFlags
\return The values of the PxTetrahedronMesh flags.
*/
virtual PxTetrahedronMeshFlags getTetrahedronMeshFlags() const = 0;
/**
\brief Returns the tetrahedra remapping table.
The tetrahedra are internally sorted according to various criteria. Hence the internal tetrahedron order
does not always match the original (user-defined) order. The remapping table helps finding the old
indices knowing the new ones:
remapTable[ internalTetrahedronIndex ] = originalTetrahedronIndex
\return the remapping table (or NULL if 'PxCookingParams::suppressTriangleMeshRemapTable' has been used)
@see getNbTetrahedron() getTetrahedrons() PxCookingParams::suppressTriangleMeshRemapTable
*/
virtual const PxU32* getTetrahedraRemap() const = 0;
/**
\brief Returns the local-space (vertex space) AABB from the tetrahedron mesh.
\return local-space bounds
*/
virtual PxBounds3 getLocalBounds() const = 0;
/**
\brief Decrements the reference count of a tetrahedron mesh and releases it if the new reference count is zero.
@see PxPhysics.createTetrahedronMesh()
*/
virtual void release() = 0;
protected:
PX_INLINE PxTetrahedronMesh(PxType concreteType, PxBaseFlags baseFlags) : PxRefCounted(concreteType, baseFlags) {}
PX_INLINE PxTetrahedronMesh(PxBaseFlags baseFlags) : PxRefCounted(baseFlags) {}
virtual ~PxTetrahedronMesh() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxTetrahedronMesh", PxRefCounted); }
};
/**
\brief A softbody mesh, containing structures to store collision shape, simulation shape and deformation state
The class bundles shapes and deformation state of a softbody that is simulated using FEM. The meshes used for
collision detection and for the FEM calculations are both tetrahedral meshes. While collision detection requires
a mesh that matches the surface of the simulated body as exactly as possible, the simulation mesh has more freedom
such that it can be optimized for tetrahedra without small angles and nodes that aren't shared by too many elements.
<h3>Creation</h3>
To create an instance of this class call PxPhysics::createSoftBodyMesh(),
and release() to delete it. This is only possible
once you have released all of its PxShape instances.
*/
class PxSoftBodyMesh : public PxRefCounted
{
public:
/**
\brief Const accecssor to the softbody's collision mesh.
@see PxTetrahedronMesh
*/
virtual const PxTetrahedronMesh* getCollisionMesh() const = 0;
/**
\brief Accecssor to the softbody's collision mesh.
@see PxTetrahedronMesh
*/
virtual PxTetrahedronMesh* getCollisionMesh() = 0;
/**
\brief Const accessor to the softbody's simulation mesh.
@see PxTetrahedronMesh
*/
virtual const PxTetrahedronMesh* getSimulationMesh() const = 0;
/**
\brief Accecssor to the softbody's simulation mesh.
@see PxTetrahedronMesh
*/
virtual PxTetrahedronMesh* getSimulationMesh() = 0;
/**
\brief Const accessor to the softbodies simulation state.
@see PxSoftBodyAuxData
*/
virtual const PxSoftBodyAuxData* getSoftBodyAuxData() const = 0;
/**
\brief Accessor to the softbody's auxilary data like mass and rest pose information
@see PxSoftBodyAuxData
*/
virtual PxSoftBodyAuxData* getSoftBodyAuxData() = 0;
/**
\brief Decrements the reference count of a tetrahedron mesh and releases it if the new reference count is zero.
@see PxPhysics.createTetrahedronMesh()
*/
virtual void release() = 0;
protected:
PX_INLINE PxSoftBodyMesh(PxType concreteType, PxBaseFlags baseFlags) : PxRefCounted(concreteType, baseFlags) {}
PX_INLINE PxSoftBodyMesh(PxBaseFlags baseFlags) : PxRefCounted(baseFlags) {}
virtual ~PxSoftBodyMesh() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxSoftBodyMesh", PxRefCounted); }
};
/**
\brief Contains information about how to update the collision mesh's vertices given a deformed simulation tetmesh.
@see PxTetrahedronMeshData
*/
class PxCollisionMeshMappingData : public PxUserAllocated
{
public:
virtual void release() = 0;
virtual ~PxCollisionMeshMappingData() {}
};
/**
\brief Stores data to accelerate collision detection of a tetrahedral mesh
@see PxTetrahedronMeshData
*/
class PxSoftBodyCollisionData : public PxUserAllocated
{
};
/**
\brief Contains raw geometry information describing the tetmesh's vertices and its elements (tetrahedra)
@see PxTetrahedronMeshData
*/
class PxTetrahedronMeshData : public PxUserAllocated
{
};
/**
\brief Stores data to compute and store the state of a deformed tetrahedral mesh
@see PxTetrahedronMeshData
*/
class PxSoftBodySimulationData : public PxUserAllocated
{
};
/**
\brief Conbines PxTetrahedronMeshData and PxSoftBodyCollisionData
@see PxTetrahedronMeshData PxSoftBodyCollisionData
*/
class PxCollisionTetrahedronMeshData : public PxUserAllocated
{
public:
virtual const PxTetrahedronMeshData* getMesh() const = 0;
virtual PxTetrahedronMeshData* getMesh() = 0;
virtual const PxSoftBodyCollisionData* getData() const = 0;
virtual PxSoftBodyCollisionData* getData() = 0;
virtual void release() = 0;
virtual ~PxCollisionTetrahedronMeshData() {}
};
/**
\brief Conbines PxTetrahedronMeshData and PxSoftBodyCollisionData
@see PxTetrahedronMeshData PxSoftBodySimulationData
*/
class PxSimulationTetrahedronMeshData : public PxUserAllocated
{
public:
virtual PxTetrahedronMeshData* getMesh() = 0;
virtual PxSoftBodySimulationData* getData() = 0;
virtual void release() = 0;
virtual ~PxSimulationTetrahedronMeshData() {}
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxHeightFieldDesc.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_HEIGHTFIELD_DESC_H
#define PX_HEIGHTFIELD_DESC_H
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "geometry/PxHeightFieldFlag.h"
#include "common/PxCoreUtilityTypes.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Descriptor class for #PxHeightField.
\note The heightfield data is *copied* when a PxHeightField object is created from this descriptor. After the call the
user may discard the height data.
@see PxHeightField PxHeightFieldGeometry PxShape PxPhysics.createHeightField() PxCooking.createHeightField()
*/
class PxHeightFieldDesc
{
public:
/**
\brief Number of sample rows in the height field samples array.
\note Local space X-axis corresponds to rows.
<b>Range:</b> >1<br>
<b>Default:</b> 0
*/
PxU32 nbRows;
/**
\brief Number of sample columns in the height field samples array.
\note Local space Z-axis corresponds to columns.
<b>Range:</b> >1<br>
<b>Default:</b> 0
*/
PxU32 nbColumns;
/**
\brief Format of the sample data.
Currently the only supported format is PxHeightFieldFormat::eS16_TM:
<b>Default:</b> PxHeightFieldFormat::eS16_TM
@see PxHeightFormat PxHeightFieldDesc.samples
*/
PxHeightFieldFormat::Enum format;
/**
\brief The samples array.
It is copied to the SDK's storage at creation time.
There are nbRows * nbColumn samples in the array,
which define nbRows * nbColumn vertices and cells,
of which (nbRows - 1) * (nbColumns - 1) cells are actually used.
The array index of sample(row, column) = row * nbColumns + column.
The byte offset of sample(row, column) = sampleStride * (row * nbColumns + column).
The sample data follows at the offset and spans the number of bytes defined by the format.
Then there are zero or more unused bytes depending on sampleStride before the next sample.
<b>Default:</b> NULL
@see PxHeightFormat
*/
PxStridedData samples;
/**
This threshold is used by the collision detection to determine if a height field edge is convex
and can generate contact points.
Usually the convexity of an edge is determined from the angle (or cosine of the angle) between
the normals of the faces sharing that edge.
The height field allows a more efficient approach by comparing height values of neighboring vertices.
This parameter offsets the comparison. Smaller changes than 0.5 will not alter the set of convex edges.
The rule of thumb is that larger values will result in fewer edge contacts.
This parameter is ignored in contact generation with sphere and capsule primitives.
<b>Range:</b> [0, PX_MAX_F32)<br>
<b>Default:</b> 0
*/
PxReal convexEdgeThreshold;
/**
\brief Flags bits, combined from values of the enum ::PxHeightFieldFlag.
<b>Default:</b> 0
@see PxHeightFieldFlag PxHeightFieldFlags
*/
PxHeightFieldFlags flags;
/**
\brief Constructor sets to default.
*/
PX_INLINE PxHeightFieldDesc();
/**
\brief (re)sets the structure to the default.
*/
PX_INLINE void setToDefault();
/**
\brief Returns true if the descriptor is valid.
\return True if the current settings are valid.
*/
PX_INLINE bool isValid() const;
};
PX_INLINE PxHeightFieldDesc::PxHeightFieldDesc() //constructor sets to default
{
nbColumns = 0;
nbRows = 0;
format = PxHeightFieldFormat::eS16_TM;
convexEdgeThreshold = 0.0f;
flags = PxHeightFieldFlags();
}
PX_INLINE void PxHeightFieldDesc::setToDefault()
{
*this = PxHeightFieldDesc();
}
PX_INLINE bool PxHeightFieldDesc::isValid() const
{
if (nbColumns < 2)
return false;
if (nbRows < 2)
return false;
if(format != PxHeightFieldFormat::eS16_TM)
return false;
if (samples.stride < 4)
return false;
if (convexEdgeThreshold < 0)
return false;
if ((flags & PxHeightFieldFlag::eNO_BOUNDARY_EDGES) != flags)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxCapsuleGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_CAPSULE_GEOMETRY_H
#define PX_CAPSULE_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#include "foundation/PxFoundationConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Class representing the geometry of a capsule.
Capsules are shaped as the union of a cylinder of length 2 * halfHeight and with the
given radius centered at the origin and extending along the x axis, and two hemispherical ends.
\note The scaling of the capsule is expected to be baked into these values, there is no additional scaling parameter.
The function PxTransformFromSegment is a helper for generating an appropriate transform for the capsule from the capsule's interior line segment.
@see PxTransformFromSegment
*/
class PxCapsuleGeometry : public PxGeometry
{
public:
/**
\brief Constructor, initializes to a capsule with passed radius and half height.
*/
PX_INLINE PxCapsuleGeometry(PxReal radius_=0.0f, PxReal halfHeight_=0.0f) : PxGeometry(PxGeometryType::eCAPSULE), radius(radius_), halfHeight(halfHeight_) {}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxCapsuleGeometry(const PxCapsuleGeometry& that) : PxGeometry(that), radius(that.radius), halfHeight(that.halfHeight) {}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxCapsuleGeometry& that)
{
mType = that.mType;
radius = that.radius;
halfHeight = that.halfHeight;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid.
\note A valid capsule has radius > 0, halfHeight >= 0.
It is illegal to call PxRigidActor::createShape and PxPhysics::createShape with a capsule that has zero radius or height.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_INLINE bool isValid() const;
public:
/**
\brief The radius of the capsule.
*/
PxReal radius;
/**
\brief half of the capsule's height, measured between the centers of the hemispherical ends.
*/
PxReal halfHeight;
};
PX_INLINE bool PxCapsuleGeometry::isValid() const
{
if(mType != PxGeometryType::eCAPSULE)
return false;
if(!PxIsFinite(radius) || !PxIsFinite(halfHeight))
return false;
if(radius <= 0.0f || halfHeight < 0.0f)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxBoxGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_BOX_GEOMETRY_H
#define PX_BOX_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#include "foundation/PxVec3.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Class representing the geometry of a box.
The geometry of a box can be fully specified by its half extents. This is the half of its width, height, and depth.
\note The scaling of the box is expected to be baked into these values, there is no additional scaling parameter.
*/
class PxBoxGeometry : public PxGeometry
{
public:
/**
\brief Constructor to initialize half extents from scalar parameters.
\param hx Initial half extents' x component.
\param hy Initial half extents' y component.
\param hz Initial half extents' z component.
*/
PX_INLINE PxBoxGeometry(PxReal hx=0.0f, PxReal hy=0.0f, PxReal hz=0.0f) : PxGeometry(PxGeometryType::eBOX), halfExtents(hx, hy, hz) {}
/**
\brief Constructor to initialize half extents from vector parameter.
\param halfExtents_ Initial half extents.
*/
PX_INLINE PxBoxGeometry(PxVec3 halfExtents_) : PxGeometry(PxGeometryType::eBOX), halfExtents(halfExtents_) {}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxBoxGeometry(const PxBoxGeometry& that) : PxGeometry(that), halfExtents(that.halfExtents) {}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxBoxGeometry& that)
{
mType = that.mType;
halfExtents = that.halfExtents;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid
\note A valid box has a positive extent in each direction (halfExtents.x > 0, halfExtents.y > 0, halfExtents.z > 0).
It is illegal to call PxRigidActor::createShape and PxPhysics::createShape with a box that has zero extent in any direction.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_INLINE bool isValid() const;
public:
/**
\brief Half of the width, height, and depth of the box.
*/
PxVec3 halfExtents;
};
PX_INLINE bool PxBoxGeometry::isValid() const
{
if(mType != PxGeometryType::eBOX)
return false;
if(!halfExtents.isFinite())
return false;
if(halfExtents.x <= 0.0f || halfExtents.y <= 0.0f || halfExtents.z <= 0.0f)
return false;
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGeometryQueryFlags.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GEOMETRY_QUERY_FLAGS_H
#define PX_GEOMETRY_QUERY_FLAGS_H
#include "foundation/PxFlags.h"
#include "common/PxPhysXCommonConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Geometry-level query flags.
@see PxScene::raycast PxScene::overlap PxScene::sweep PxBVH::raycast PxBVH::overlap PxBVH::sweep PxGeometryQuery::raycast PxGeometryQuery::overlap PxGeometryQuery::sweep
@see PxGeometryQuery::computePenetration PxGeometryQuery::pointDistance PxGeometryQuery::computeGeomBounds
@see PxMeshQuery::findOverlapTriangleMesh PxMeshQuery::findOverlapHeightField PxMeshQuery::sweep
*/
struct PxGeometryQueryFlag
{
enum Enum
{
eSIMD_GUARD = (1<<0), //!< Saves/restores SIMD control word for each query (safer but slower). Omit this if you took care of it yourself in your app.
eDEFAULT = eSIMD_GUARD
};
};
/**
\brief collection of set bits defined in PxGeometryQueryFlag.
@see PxGeometryQueryFlag
*/
PX_FLAGS_TYPEDEF(PxGeometryQueryFlag, PxU32)
#if !PX_DOXYGEN
}
#endif
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxSimpleTriangleMesh.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SIMPLE_TRIANGLE_MESH_H
#define PX_SIMPLE_TRIANGLE_MESH_H
/** \addtogroup geomutils
@{
*/
#include "foundation/PxVec3.h"
#include "foundation/PxFlags.h"
#include "common/PxCoreUtilityTypes.h"
#include "common/PxPhysXCommonConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Enum with flag values to be used in PxSimpleTriangleMesh::flags.
*/
struct PxMeshFlag
{
enum Enum
{
/**
\brief Specifies if the SDK should flip normals.
The PhysX libraries assume that the face normal of a triangle with vertices [a,b,c] can be computed as:
edge1 = b-a
edge2 = c-a
face_normal = edge1 x edge2.
Note: This is the same as a counterclockwise winding in a right handed coordinate system or
alternatively a clockwise winding order in a left handed coordinate system.
If this does not match the winding order for your triangles, raise the below flag.
*/
eFLIPNORMALS = (1<<0),
e16_BIT_INDICES = (1<<1) //!< Denotes the use of 16-bit vertex indices
};
};
/**
\brief collection of set bits defined in PxMeshFlag.
@see PxMeshFlag
*/
typedef PxFlags<PxMeshFlag::Enum,PxU16> PxMeshFlags;
PX_FLAGS_OPERATORS(PxMeshFlag::Enum,PxU16)
/**
\brief A structure describing a triangle mesh.
*/
class PxSimpleTriangleMesh
{
public:
/**
\brief Pointer to first vertex point.
*/
PxBoundedData points;
/**
\brief Pointer to first triangle.
Caller may add triangleStrideBytes bytes to the pointer to access the next triangle.
These are triplets of 0 based indices:
vert0 vert1 vert2
vert0 vert1 vert2
vert0 vert1 vert2
...
where vertex is either a 32 or 16 bit unsigned integer. There are numTriangles*3 indices.
This is declared as a void pointer because it is actually either an PxU16 or a PxU32 pointer.
*/
PxBoundedData triangles;
/**
\brief Flags bits, combined from values of the enum ::PxMeshFlag
*/
PxMeshFlags flags;
/**
\brief constructor sets to default.
*/
PX_INLINE PxSimpleTriangleMesh();
/**
\brief (re)sets the structure to the default.
*/
PX_INLINE void setToDefault();
/**
\brief returns true if the current settings are valid
*/
PX_INLINE bool isValid() const;
};
PX_INLINE PxSimpleTriangleMesh::PxSimpleTriangleMesh()
{
}
PX_INLINE void PxSimpleTriangleMesh::setToDefault()
{
*this = PxSimpleTriangleMesh();
}
PX_INLINE bool PxSimpleTriangleMesh::isValid() const
{
// Check geometry
if(points.count > 0xffff && flags & PxMeshFlag::e16_BIT_INDICES)
return false;
if(!points.data)
return false;
if(points.stride < sizeof(PxVec3)) //should be at least one point's worth of data
return false;
// Check topology
// The triangles pointer is not mandatory
if(triangles.data)
{
// Indexed mesh
PxU32 limit = (flags & PxMeshFlag::e16_BIT_INDICES) ? sizeof(PxU16)*3 : sizeof(PxU32)*3;
if(triangles.stride < limit)
return false;
}
return true;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxHeightFieldFlag.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_HEIGHT_FIELD_FLAG_H
#define PX_HEIGHT_FIELD_FLAG_H
/** \addtogroup geomutils
@{
*/
#include "foundation/PxFlags.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Describes the format of height field samples.
@see PxHeightFieldDesc.format PxHeightFieldDesc.samples
*/
struct PxHeightFieldFormat
{
enum Enum
{
/**
\brief Height field height data is 16 bit signed integers, followed by triangle materials.
Each sample is 32 bits wide arranged as follows:
\image html heightFieldFormat_S16_TM.png
1) First there is a 16 bit height value.
2) Next, two one byte material indices, with the high bit of each byte reserved for special use.
(so the material index is only 7 bits).
The high bit of material0 is the tess-flag.
The high bit of material1 is reserved for future use.
There are zero or more unused bytes before the next sample depending on PxHeightFieldDesc.sampleStride,
where the application may eventually keep its own data.
This is the only format supported at the moment.
@see PxHeightFieldDesc.format PxHeightFieldDesc.samples
*/
eS16_TM = (1 << 0)
};
};
/**
\brief Determines the tessellation of height field cells.
@see PxHeightFieldDesc.format PxHeightFieldDesc.samples
*/
struct PxHeightFieldTessFlag
{
enum Enum
{
/**
\brief This flag determines which way each quad cell is subdivided.
The flag lowered indicates subdivision like this: (the 0th vertex is referenced by only one triangle)
\image html heightfieldTriMat2.PNG
<pre>
+--+--+--+---> column
| /| /| /|
|/ |/ |/ |
+--+--+--+
| /| /| /|
|/ |/ |/ |
+--+--+--+
|
|
V row
</pre>
The flag raised indicates subdivision like this: (the 0th vertex is shared by two triangles)
\image html heightfieldTriMat1.PNG
<pre>
+--+--+--+---> column
|\ |\ |\ |
| \| \| \|
+--+--+--+
|\ |\ |\ |
| \| \| \|
+--+--+--+
|
|
V row
</pre>
@see PxHeightFieldDesc.format PxHeightFieldDesc.samples
*/
e0TH_VERTEX_SHARED = (1 << 0)
};
};
/**
\brief Enum with flag values to be used in PxHeightFieldDesc.flags.
*/
struct PxHeightFieldFlag
{
enum Enum
{
/**
\brief Disable collisions with height field with boundary edges.
Raise this flag if several terrain patches are going to be placed adjacent to each other,
to avoid a bump when sliding across.
This flag is ignored in contact generation with sphere and capsule shapes.
@see PxHeightFieldDesc.flags
*/
eNO_BOUNDARY_EDGES = (1 << 0)
};
};
/**
\brief collection of set bits defined in PxHeightFieldFlag.
@see PxHeightFieldFlag
*/
typedef PxFlags<PxHeightFieldFlag::Enum,PxU16> PxHeightFieldFlags;
PX_FLAGS_OPERATORS(PxHeightFieldFlag::Enum,PxU16)
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGeometryHit.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GEOMETRY_HIT_H
#define PX_GEOMETRY_HIT_H
/** \addtogroup scenequery
@{
*/
#include "foundation/PxVec3.h"
#include "foundation/PxFlags.h"
#include "common/PxPhysXCommonConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Scene query and geometry query behavior flags.
PxHitFlags are used for 3 different purposes:
1) To request hit fields to be filled in by scene queries (such as hit position, normal, face index or UVs).
2) Once query is completed, to indicate which fields are valid (note that a query may produce more valid fields than requested).
3) To specify additional options for the narrow phase and mid-phase intersection routines.
All these flags apply to both scene queries and geometry queries (PxGeometryQuery).
@see PxRaycastHit PxSweepHit PxOverlapHit PxScene.raycast PxScene.sweep PxScene.overlap PxGeometryQuery PxFindFaceIndex
*/
struct PxHitFlag
{
enum Enum
{
ePOSITION = (1<<0), //!< "position" member of #PxQueryHit is valid
eNORMAL = (1<<1), //!< "normal" member of #PxQueryHit is valid
eUV = (1<<3), //!< "u" and "v" barycentric coordinates of #PxQueryHit are valid. Not applicable to sweep queries.
eASSUME_NO_INITIAL_OVERLAP = (1<<4), //!< Performance hint flag for sweeps when it is known upfront there's no initial overlap.
//!< NOTE: using this flag may cause undefined results if shapes are initially overlapping.
eANY_HIT = (1<<5), //!< Report any first hit. Used for geometries that contain more than one primitive. For meshes,
//!< if neither eMESH_MULTIPLE nor eANY_HIT is specified, a single closest hit will be reported.
eMESH_MULTIPLE = (1<<6), //!< Report all hits for meshes rather than just the first. Not applicable to sweep queries.
eMESH_ANY = eANY_HIT, //!< @deprecated Deprecated, please use eANY_HIT instead.
eMESH_BOTH_SIDES = (1<<7), //!< Report hits with back faces of mesh triangles. Also report hits for raycast
//!< originating on mesh surface and facing away from the surface normal. Not applicable to sweep queries.
//!< Please refer to the user guide for heightfield-specific differences.
ePRECISE_SWEEP = (1<<8), //!< Use more accurate but slower narrow phase sweep tests.
//!< May provide better compatibility with PhysX 3.2 sweep behavior.
eMTD = (1<<9), //!< Report the minimum translation depth, normal and contact point.
eFACE_INDEX = (1<<10), //!< "face index" member of #PxQueryHit is valid
eDEFAULT = ePOSITION|eNORMAL|eFACE_INDEX,
/** \brief Only this subset of flags can be modified by pre-filter. Other modifications will be discarded. */
eMODIFIABLE_FLAGS = eMESH_MULTIPLE|eMESH_BOTH_SIDES|eASSUME_NO_INITIAL_OVERLAP|ePRECISE_SWEEP
};
};
/**
\brief collection of set bits defined in PxHitFlag.
@see PxHitFlag
*/
PX_FLAGS_TYPEDEF(PxHitFlag, PxU16)
/**
\brief Scene query hit information.
*/
struct PxQueryHit
{
PX_INLINE PxQueryHit() : faceIndex(0xFFFFffff) {}
/**
Face index of touched triangle, for triangle meshes, convex meshes and height fields.
\note This index will default to 0xFFFFffff value for overlap queries.
\note Please refer to the user guide for more details for sweep queries.
\note This index is remapped by mesh cooking. Use #PxTriangleMesh::getTrianglesRemap() to convert to original mesh index.
\note For convex meshes use #PxConvexMesh::getPolygonData() to retrieve touched polygon data.
*/
PxU32 faceIndex;
};
/**
\brief Scene query hit information for raycasts and sweeps returning hit position and normal information.
::PxHitFlag flags can be passed to scene query functions, as an optimization, to cause the SDK to
only generate specific members of this structure.
*/
struct PxLocationHit : PxQueryHit
{
PX_INLINE PxLocationHit() : flags(0), position(PxVec3(0)), normal(PxVec3(0)), distance(PX_MAX_REAL) {}
/**
\note For raycast hits: true for shapes overlapping with raycast origin.
\note For sweep hits: true for shapes overlapping at zero sweep distance.
@see PxRaycastHit PxSweepHit
*/
PX_INLINE bool hadInitialOverlap() const { return (distance <= 0.0f); }
// the following fields are set in accordance with the #PxHitFlags
PxHitFlags flags; //!< Hit flags specifying which members contain valid values.
PxVec3 position; //!< World-space hit position (flag: #PxHitFlag::ePOSITION)
PxVec3 normal; //!< World-space hit normal (flag: #PxHitFlag::eNORMAL)
/**
\brief Distance to hit.
\note If the eMTD flag is used, distance will be a negative value if shapes are overlapping indicating the penetration depth.
\note Otherwise, this value will be >= 0 */
PxF32 distance;
};
/**
\brief Stores results of raycast queries.
::PxHitFlag flags can be passed to raycast function, as an optimization, to cause the SDK to only compute specified members of this
structure.
Some members like barycentric coordinates are currently only computed for triangle meshes and height fields, but next versions
might provide them in other cases. The client code should check #flags to make sure returned values are valid.
@see PxScene.raycast
*/
struct PxGeomRaycastHit : PxLocationHit
{
PX_INLINE PxGeomRaycastHit() : u(0.0f), v(0.0f) {}
// the following fields are set in accordance with the #PxHitFlags
PxReal u, v; //!< barycentric coordinates of hit point, for triangle mesh and height field (flag: #PxHitFlag::eUV)
};
/**
\brief Stores results of overlap queries.
@see PxScene.overlap
*/
struct PxGeomOverlapHit : PxQueryHit
{
PX_INLINE PxGeomOverlapHit() {}
};
/**
\brief Stores results of sweep queries.
@see PxScene.sweep
*/
struct PxGeomSweepHit : PxLocationHit
{
PX_INLINE PxGeomSweepHit() {}
};
/**
\brief Pair of indices, typically either object or triangle indices.
*/
struct PxGeomIndexPair
{
PX_FORCE_INLINE PxGeomIndexPair() {}
PX_FORCE_INLINE PxGeomIndexPair(PxU32 _id0, PxU32 _id1) : id0(_id0), id1(_id1) {}
PxU32 id0, id1;
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxHeightField.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_HEIGHTFIELD_H
#define PX_HEIGHTFIELD_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxHeightFieldFlag.h"
#include "geometry/PxHeightFieldSample.h"
#include "common/PxBase.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxHeightFieldDesc;
/**
\brief A height field class.
Height fields work in a similar way as triangle meshes specified to act as
height fields, with some important differences:
Triangle meshes can be made of nonuniform geometry, while height fields are
regular, rectangular grids. This means that with PxHeightField, you sacrifice
flexibility in return for improved performance and decreased memory consumption.
In local space rows extend in X direction, columns in Z direction and height in Y direction.
Like Convexes and TriangleMeshes, HeightFields are referenced by shape instances
(see #PxHeightFieldGeometry, #PxShape).
To avoid duplicating data when you have several instances of a particular
height field differently, you do not use this class to represent a
height field object directly. Instead, you create an instance of this height field
via the PxHeightFieldGeometry and PxShape classes.
<h3>Creation</h3>
To create an instance of this class call PxPhysics::createHeightField() or
PxCooking::createHeightField(const PxHeightFieldDesc&, PxInsertionCallback&).
To delete it call release(). This is only possible
once you have released all of its PxHeightFiedShape instances.
<h3>Visualizations:</h3>
\li #PxVisualizationParameter::eCOLLISION_AABBS
\li #PxVisualizationParameter::eCOLLISION_SHAPES
\li #PxVisualizationParameter::eCOLLISION_AXES
\li #PxVisualizationParameter::eCOLLISION_FNORMALS
\li #PxVisualizationParameter::eCOLLISION_EDGES
@see PxHeightFieldDesc PxHeightFieldGeometry PxShape PxPhysics.createHeightField() PxCooking.createHeightField()
*/
class PxHeightField : public PxRefCounted
{
public:
/**
\brief Decrements the reference count of a height field and releases it if the new reference count is zero.
@see PxPhysics.createHeightField() PxHeightFieldDesc PxHeightFieldGeometry PxShape
*/
virtual void release() = 0;
/**
\brief Writes out the sample data array.
The user provides destBufferSize bytes storage at destBuffer.
The data is formatted and arranged as PxHeightFieldDesc.samples.
\param[out] destBuffer The destination buffer for the sample data.
\param[in] destBufferSize The size of the destination buffer.
\return The number of bytes written.
@see PxHeightFieldDesc.samples
*/
virtual PxU32 saveCells(void* destBuffer, PxU32 destBufferSize) const = 0;
/**
\brief Replaces a rectangular subfield in the sample data array.
The user provides the description of a rectangular subfield in subfieldDesc.
The data is formatted and arranged as PxHeightFieldDesc.samples.
\param[in] startCol First cell in the destination heightfield to be modified. Can be negative.
\param[in] startRow First row in the destination heightfield to be modified. Can be negative.
\param[in] subfieldDesc Description of the source subfield to read the samples from.
\param[in] shrinkBounds If left as false, the bounds will never shrink but only grow. If set to true the bounds will be recomputed from all HF samples at O(nbColums*nbRows) perf cost.
\return True on success, false on failure. Failure can occur due to format mismatch.
\note Modified samples are constrained to the same height quantization range as the original heightfield.
Source samples that are out of range of target heightfield will be clipped with no error.
PhysX does not keep a mapping from the heightfield to heightfield shapes that reference it.
Call PxShape::setGeometry on each shape which references the height field, to ensure that internal data structures are updated to reflect the new geometry.
Please note that PxShape::setGeometry does not guarantee correct/continuous behavior when objects are resting on top of old or new geometry.
@see PxHeightFieldDesc.samples PxShape.setGeometry
*/
virtual bool modifySamples(PxI32 startCol, PxI32 startRow, const PxHeightFieldDesc& subfieldDesc, bool shrinkBounds = false) = 0;
/**
\brief Retrieves the number of sample rows in the samples array.
\return The number of sample rows in the samples array.
@see PxHeightFieldDesc.nbRows
*/
virtual PxU32 getNbRows() const = 0;
/**
\brief Retrieves the number of sample columns in the samples array.
\return The number of sample columns in the samples array.
@see PxHeightFieldDesc.nbColumns
*/
virtual PxU32 getNbColumns() const = 0;
/**
\brief Retrieves the format of the sample data.
\return The format of the sample data.
@see PxHeightFieldDesc.format PxHeightFieldFormat
*/
virtual PxHeightFieldFormat::Enum getFormat() const = 0;
/**
\brief Retrieves the offset in bytes between consecutive samples in the array.
\return The offset in bytes between consecutive samples in the array.
@see PxHeightFieldDesc.sampleStride
*/
virtual PxU32 getSampleStride() const = 0;
/**
\brief Retrieves the convex edge threshold.
\return The convex edge threshold.
@see PxHeightFieldDesc.convexEdgeThreshold
*/
virtual PxReal getConvexEdgeThreshold() const = 0;
/**
\brief Retrieves the flags bits, combined from values of the enum ::PxHeightFieldFlag.
\return The flags bits, combined from values of the enum ::PxHeightFieldFlag.
@see PxHeightFieldDesc.flags PxHeightFieldFlag
*/
virtual PxHeightFieldFlags getFlags() const = 0;
/**
\brief Retrieves the height at the given coordinates in grid space.
\return The height at the given coordinates or 0 if the coordinates are out of range.
*/
virtual PxReal getHeight(PxReal x, PxReal z) const = 0;
/**
\brief Returns material table index of given triangle
\note This function takes a post cooking triangle index.
\param[in] triangleIndex (internal) index of desired triangle
\return Material table index, or 0xffff if no per-triangle materials are used
*/
virtual PxMaterialTableIndex getTriangleMaterialIndex(PxTriangleID triangleIndex) const = 0;
/**
\brief Returns a triangle face normal for a given triangle index
\note This function takes a post cooking triangle index.
\param[in] triangleIndex (internal) index of desired triangle
\return Triangle normal for a given triangle index
*/
virtual PxVec3 getTriangleNormal(PxTriangleID triangleIndex) const = 0;
/**
\brief Returns heightfield sample of given row and column
\param[in] row Given heightfield row
\param[in] column Given heightfield column
\return Heightfield sample
*/
virtual const PxHeightFieldSample& getSample(PxU32 row, PxU32 column) const = 0;
/**
\brief Returns the number of times the heightfield data has been modified
This method returns the number of times modifySamples has been called on this heightfield, so that code that has
retained state that depends on the heightfield can efficiently determine whether it has been modified.
\return the number of times the heightfield sample data has been modified.
*/
virtual PxU32 getTimestamp() const = 0;
virtual const char* getConcreteTypeName() const { return "PxHeightField"; }
protected:
PX_INLINE PxHeightField(PxType concreteType, PxBaseFlags baseFlags) : PxRefCounted(concreteType, baseFlags) {}
PX_INLINE PxHeightField(PxBaseFlags baseFlags) : PxRefCounted(baseFlags) {}
virtual ~PxHeightField() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxHeightField", PxRefCounted); }
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxTriangleMesh.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TRIANGLE_MESH_H
#define PX_TRIANGLE_MESH_H
/** \addtogroup geomutils
@{ */
#include "foundation/PxVec3.h"
#include "foundation/PxBounds3.h"
#include "common/PxPhysXCommonConfig.h"
#include "common/PxBase.h"
#include "foundation/PxUserAllocated.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Mesh midphase structure. This enum is used to select the desired acceleration structure for midphase queries
(i.e. raycasts, overlaps, sweeps vs triangle meshes).
The PxMeshMidPhase::eBVH33 structure is the one used in recent PhysX versions (up to PhysX 3.3). It has great performance and is
supported on all platforms. It is deprecated since PhysX 5.x.
The PxMeshMidPhase::eBVH34 structure is a revisited implementation introduced in PhysX 3.4. It can be significantly faster both
in terms of cooking performance and runtime performance.
*/
struct PxMeshMidPhase
{
enum Enum
{
eBVH33 = 0, //!< Default midphase mesh structure, as used up to PhysX 3.3 (deprecated)
eBVH34 = 1, //!< New midphase mesh structure, introduced in PhysX 3.4
eLAST
};
};
/**
\brief Flags for the mesh geometry properties.
Used in ::PxTriangleMeshFlags.
*/
struct PxTriangleMeshFlag
{
enum Enum
{
e16_BIT_INDICES = (1<<1), //!< The triangle mesh has 16bits vertex indices.
eADJACENCY_INFO = (1<<2), //!< The triangle mesh has adjacency information build.
ePREFER_NO_SDF_PROJ = (1<<3)//!< Indicates that this mesh would preferably not be the mesh projected for mesh-mesh collision. This can indicate that the mesh is not well tessellated.
};
};
/**
\brief collection of set bits defined in PxTriangleMeshFlag.
@see PxTriangleMeshFlag
*/
typedef PxFlags<PxTriangleMeshFlag::Enum,PxU8> PxTriangleMeshFlags;
PX_FLAGS_OPERATORS(PxTriangleMeshFlag::Enum,PxU8)
/**
\brief A triangle mesh, also called a 'polygon soup'.
It is represented as an indexed triangle list. There are no restrictions on the
triangle data.
To avoid duplicating data when you have several instances of a particular
mesh positioned differently, you do not use this class to represent a
mesh object directly. Instead, you create an instance of this mesh via
the PxTriangleMeshGeometry and PxShape classes.
<h3>Creation</h3>
To create an instance of this class call PxPhysics::createTriangleMesh(),
and release() to delete it. This is only possible
once you have released all of its PxShape instances.
<h3>Visualizations:</h3>
\li #PxVisualizationParameter::eCOLLISION_AABBS
\li #PxVisualizationParameter::eCOLLISION_SHAPES
\li #PxVisualizationParameter::eCOLLISION_AXES
\li #PxVisualizationParameter::eCOLLISION_FNORMALS
\li #PxVisualizationParameter::eCOLLISION_EDGES
@see PxTriangleMeshDesc PxTriangleMeshGeometry PxShape PxPhysics.createTriangleMesh()
*/
class PxTriangleMesh : public PxRefCounted
{
public:
/**
\brief Returns the number of vertices.
\return number of vertices
@see getVertices()
*/
virtual PxU32 getNbVertices() const = 0;
/**
\brief Returns the vertices.
\return array of vertices
@see getNbVertices()
*/
virtual const PxVec3* getVertices() const = 0;
/**
\brief Returns all mesh vertices for modification.
This function will return the vertices of the mesh so that their positions can be changed in place.
After modifying the vertices you must call refitBVH for the refitting to actually take place.
This function maintains the old mesh topology (triangle indices).
\return inplace vertex coordinates for each existing mesh vertex.
\note It is recommended to use this feature for scene queries only.
\note Size of array returned is equal to the number returned by getNbVertices().
\note This function operates on cooked vertex indices.
\note This means the index mapping and vertex count can be different from what was provided as an input to the cooking routine.
\note To achieve unchanged 1-to-1 index mapping with orignal mesh data (before cooking) please use the following cooking flags:
\note eWELD_VERTICES = 0, eDISABLE_CLEAN_MESH = 1.
\note It is also recommended to make sure that a call to validateTriangleMesh returns true if mesh cleaning is disabled.
@see getNbVertices()
@see refitBVH()
*/
virtual PxVec3* getVerticesForModification() = 0;
/**
\brief Refits BVH for mesh vertices.
This function will refit the mesh BVH to correctly enclose the new positions updated by getVerticesForModification.
Mesh BVH will not be reoptimized by this function so significantly different new positions will cause significantly reduced performance.
\return New bounds for the entire mesh.
\note For PxMeshMidPhase::eBVH34 trees the refit operation is only available on non-quantized trees (see PxBVH34MidphaseDesc::quantized)
\note PhysX does not keep a mapping from the mesh to mesh shapes that reference it.
\note Call PxShape::setGeometry on each shape which references the mesh, to ensure that internal data structures are updated to reflect the new geometry.
\note PxShape::setGeometry does not guarantee correct/continuous behavior when objects are resting on top of old or new geometry.
\note It is also recommended to make sure that a call to validateTriangleMesh returns true if mesh cleaning is disabled.
\note Active edges information will be lost during refit, the rigid body mesh contact generation might not perform as expected.
@see getNbVertices()
@see getVerticesForModification()
@see PxBVH34MidphaseDesc::quantized
*/
virtual PxBounds3 refitBVH() = 0;
/**
\brief Returns the number of triangles.
\return number of triangles
@see getTriangles() getTrianglesRemap()
*/
virtual PxU32 getNbTriangles() const = 0;
/**
\brief Returns the triangle indices.
The indices can be 16 or 32bit depending on the number of triangles in the mesh.
Call getTriangleMeshFlags() to know if the indices are 16 or 32 bits.
The number of indices is the number of triangles * 3.
\return array of triangles
@see getNbTriangles() getTriangleMeshFlags() getTrianglesRemap()
*/
virtual const void* getTriangles() const = 0;
/**
\brief Reads the PxTriangleMesh flags.
See the list of flags #PxTriangleMeshFlag
\return The values of the PxTriangleMesh flags.
@see PxTriangleMesh
*/
virtual PxTriangleMeshFlags getTriangleMeshFlags() const = 0;
/**
\brief Returns the triangle remapping table.
The triangles are internally sorted according to various criteria. Hence the internal triangle order
does not always match the original (user-defined) order. The remapping table helps finding the old
indices knowing the new ones:
remapTable[ internalTriangleIndex ] = originalTriangleIndex
\return the remapping table (or NULL if 'PxCookingParams::suppressTriangleMeshRemapTable' has been used)
@see getNbTriangles() getTriangles() PxCookingParams::suppressTriangleMeshRemapTable
*/
virtual const PxU32* getTrianglesRemap() const = 0;
/**
\brief Decrements the reference count of a triangle mesh and releases it if the new reference count is zero.
@see PxPhysics.createTriangleMesh()
*/
virtual void release() = 0;
/**
\brief Returns material table index of given triangle
This function takes a post cooking triangle index.
\param[in] triangleIndex (internal) index of desired triangle
\return Material table index, or 0xffff if no per-triangle materials are used
*/
virtual PxMaterialTableIndex getTriangleMaterialIndex(PxTriangleID triangleIndex) const = 0;
/**
\brief Returns the local-space (vertex space) AABB from the triangle mesh.
\return local-space bounds
*/
virtual PxBounds3 getLocalBounds() const = 0;
/**
\brief Returns the local-space Signed Distance Field for this mesh if it has one.
\return local-space SDF.
*/
virtual const PxReal* getSDF() const = 0;
/**
\brief Returns the resolution of the local-space dense SDF.
*/
virtual void getSDFDimensions(PxU32& numX, PxU32& numY, PxU32& numZ) const = 0;
/**
\brief Sets whether this mesh should be preferred for SDF projection.
By default, meshes are flagged as preferring projection and the decisions on which mesh to project is based on the triangle and vertex
count. The model with the fewer triangles is projected onto the SDF of the more detailed mesh.
If one of the meshes is set to prefer SDF projection (default) and the other is set to not prefer SDF projection, model flagged as
preferring SDF projection will be projected onto the model flagged as not preferring, regardless of the detail of the respective meshes.
Where both models are flagged as preferring no projection, the less detailed model will be projected as before.
\param[in] preferProjection Indicates if projection is preferred
*/
virtual void setPreferSDFProjection(bool preferProjection) = 0;
/**
\brief Returns whether this mesh prefers SDF projection.
\return whether this mesh prefers SDF projection.
*/
virtual bool getPreferSDFProjection() const = 0;
/**
\brief Returns the mass properties of the mesh assuming unit density.
The following relationship holds between mass and volume:
mass = volume * density
The mass of a unit density mesh is equal to its volume, so this function returns the volume of the mesh.
Similarly, to obtain the localInertia of an identically shaped object with a uniform density of d, simply multiply the
localInertia of the unit density mesh by d.
\param[out] mass The mass of the mesh assuming unit density.
\param[out] localInertia The inertia tensor in mesh local space assuming unit density.
\param[out] localCenterOfMass Position of center of mass (or centroid) in mesh local space.
*/
virtual void getMassInformation(PxReal& mass, PxMat33& localInertia, PxVec3& localCenterOfMass) const = 0;
protected:
PX_INLINE PxTriangleMesh(PxType concreteType, PxBaseFlags baseFlags) : PxRefCounted(concreteType, baseFlags) {}
PX_INLINE PxTriangleMesh(PxBaseFlags baseFlags) : PxRefCounted(baseFlags) {}
virtual ~PxTriangleMesh() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxTriangleMesh", PxRefCounted); }
};
/**
\brief A triangle mesh containing the PxMeshMidPhase::eBVH33 structure.
@see PxMeshMidPhase
@deprecated
*/
class PX_DEPRECATED PxBVH33TriangleMesh : public PxTriangleMesh
{
public:
protected:
PX_INLINE PxBVH33TriangleMesh(PxType concreteType, PxBaseFlags baseFlags) : PxTriangleMesh(concreteType, baseFlags) {}
PX_INLINE PxBVH33TriangleMesh(PxBaseFlags baseFlags) : PxTriangleMesh(baseFlags) {}
virtual ~PxBVH33TriangleMesh() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxBVH33TriangleMesh", PxTriangleMesh); }
};
/**
\brief A triangle mesh containing the PxMeshMidPhase::eBVH34 structure.
@see PxMeshMidPhase
*/
class PxBVH34TriangleMesh : public PxTriangleMesh
{
public:
protected:
PX_INLINE PxBVH34TriangleMesh(PxType concreteType, PxBaseFlags baseFlags) : PxTriangleMesh(concreteType, baseFlags) {}
PX_INLINE PxBVH34TriangleMesh(PxBaseFlags baseFlags) : PxTriangleMesh(baseFlags) {}
virtual ~PxBVH34TriangleMesh() {}
virtual bool isKindOf(const char* name) const { PX_IS_KIND_OF(name, "PxBVH34TriangleMesh", PxTriangleMesh); }
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_GEOMETRY_H
#define PX_GEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxFlags.h"
#include "foundation/PxMath.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief A geometry type.
Used to distinguish the type of a ::PxGeometry object.
*/
struct PxGeometryType
{
enum Enum
{
eSPHERE,
ePLANE,
eCAPSULE,
eBOX,
eCONVEXMESH,
ePARTICLESYSTEM,
eTETRAHEDRONMESH,
eTRIANGLEMESH,
eHEIGHTFIELD,
eHAIRSYSTEM,
eCUSTOM,
eGEOMETRY_COUNT, //!< internal use only!
eINVALID = -1 //!< internal use only!
};
};
/**
\brief A geometry object.
A geometry object defines the characteristics of a spatial object, but without any information
about its placement in the world.
\note This is an abstract class. You cannot create instances directly. Create an instance of one of the derived classes instead.
*/
class PxGeometry
{
public:
/**
\brief Returns the type of the geometry.
\return The type of the object.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxGeometryType::Enum getType() const { return mType; }
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxGeometry& that)
{
mType = that.mType;
}
protected:
PX_CUDA_CALLABLE PX_FORCE_INLINE PxGeometry(PxGeometryType::Enum type) : mType(type) {}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxGeometry(const PxGeometry& that) : mType(that.mType) {}
PxGeometryType::Enum mType;
public:
float mTypePadding; // PT: padding bytes on x64, used internally
};
#if !PX_DOXYGEN
} // namespace physx
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxCustomGeometry.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_CUSTOMGEOMETRY_H
#define PX_CUSTOMGEOMETRY_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxGeometry.h"
#include "geometry/PxGeometryHit.h"
#include "geometry/PxGeometryQueryContext.h"
#include "foundation/PxFoundationConfig.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
class PxContactBuffer;
class PxRenderOutput;
class PxMassProperties;
/**
\brief Custom geometry class. This class allows user to create custom geometries by providing a set of virtual callback functions.
*/
class PxCustomGeometry : public PxGeometry
{
public:
/**
\brief For internal use
*/
PX_PHYSX_COMMON_API static PxU32 getUniqueID();
/**
\brief The type of a custom geometry. Allows to identify a particular kind of it.
*/
struct Type
{
/**
\brief Default constructor
*/
PX_INLINE Type() : mID(getUniqueID()) {}
/**
\brief Default constructor
*/
PX_INLINE Type(const Type& t) : mID(t.mID) {}
/**
\brief Assigment operator
*/
PX_INLINE Type& operator = (const Type& t) { mID = t.mID; return *this; }
/**
\brief Equality operator
*/
PX_INLINE bool operator == (const Type& t) const { return mID == t.mID; }
/**
\brief Inequality operator
*/
PX_INLINE bool operator != (const Type& t) const { return mID != t.mID; }
/**
\brief Invalid type
*/
PX_INLINE static Type INVALID() { PxU32 z(0); return reinterpret_cast<const Type&>(z); }
private:
PxU32 mID;
};
/**
\brief Custom geometry callbacks structure. User should inherit this and implement all pure virtual functions.
*/
struct Callbacks
{
/**
\brief Return custom type. The type purpose is for user to differentiate custom geometries. Not used by PhysX.
\return Unique ID of a custom geometry type.
\note User should use DECLARE_CUSTOM_GEOMETRY_TYPE and IMPLEMENT_CUSTOM_GEOMETRY_TYPE intead of overwriting this function.
*/
virtual Type getCustomType() const = 0;
/**
\brief Return local bounds.
\param[in] geometry This geometry.
\return Bounding box in the geometry local space.
*/
virtual PxBounds3 getLocalBounds(const PxGeometry& geometry) const = 0;
/**
\brief Contacts generation. Generate collision contacts between two geometries in given poses.
\param[in] geom0 This custom geometry
\param[in] geom1 The other geometry
\param[in] pose0 This custom geometry pose
\param[in] pose1 The other geometry pose
\param[in] contactDistance The distance at which contacts begin to be generated between the pairs
\param[in] meshContactMargin The mesh contact margin.
\param[in] toleranceLength The toleranceLength. Used for scaling distance-based thresholds internally to produce appropriate results given simulations in different units
\param[out] contactBuffer A buffer to write contacts to.
\return True if there are contacts. False otherwise.
*/
virtual bool generateContacts(const PxGeometry& geom0, const PxGeometry& geom1, const PxTransform& pose0, const PxTransform& pose1,
const PxReal contactDistance, const PxReal meshContactMargin, const PxReal toleranceLength,
PxContactBuffer& contactBuffer) const = 0;
/**
\brief Raycast. Cast a ray against the geometry in given pose.
\param[in] origin Origin of the ray.
\param[in] unitDir Normalized direction of the ray.
\param[in] geom This custom geometry
\param[in] pose This custom geometry pose
\param[in] maxDist Length of the ray. Has to be in the [0, inf) range.
\param[in] hitFlags Specifies which properties per hit should be computed and returned via the hit callback.
\param[in] maxHits max number of returned hits = size of 'rayHits' buffer
\param[out] rayHits Ray hits.
\param[in] stride Ray hit structure stride.
\param[in] threadContext Optional user-defined per-thread context.
\return Number of hits.
*/
virtual PxU32 raycast(const PxVec3& origin, const PxVec3& unitDir, const PxGeometry& geom, const PxTransform& pose,
PxReal maxDist, PxHitFlags hitFlags, PxU32 maxHits, PxGeomRaycastHit* rayHits, PxU32 stride, PxRaycastThreadContext* threadContext) const = 0;
/**
\brief Overlap. Test if geometries overlap.
\param[in] geom0 This custom geometry
\param[in] pose0 This custom geometry pose
\param[in] geom1 The other geometry
\param[in] pose1 The other geometry pose
\param[in] threadContext Optional user-defined per-thread context.
\return True if there is overlap. False otherwise.
*/
virtual bool overlap(const PxGeometry& geom0, const PxTransform& pose0, const PxGeometry& geom1, const PxTransform& pose1, PxOverlapThreadContext* threadContext) const = 0;
/**
\brief Sweep. Sweep geom1 against geom0.
\param[in] unitDir Normalized direction of the sweep. geom1 is swept along this direction.
\param[in] maxDist Length of the sweep. Has to be in the [0, inf) range.
\param[in] geom0 This custom geometry
\param[in] pose0 This custom geometry pose
\param[in] geom1 The other geometry
\param[in] pose1 The other geometry pose
\param[out] sweepHit Used to report the sweep hit.
\param[in] hitFlags Specifies which properties per hit should be computed and returned via the hit callback.
\param[in] inflation This parameter creates a skin around the swept geometry which increases its extents for sweeping.
\param[in] threadContext Optional user-defined per-thread context.
\return True if there is hit. False otherwise.
*/
virtual bool sweep(const PxVec3& unitDir, const PxReal maxDist,
const PxGeometry& geom0, const PxTransform& pose0, const PxGeometry& geom1, const PxTransform& pose1,
PxGeomSweepHit& sweepHit, PxHitFlags hitFlags, const PxReal inflation, PxSweepThreadContext* threadContext) const = 0;
/**
\brief Visualize custom geometry for debugging. Optional.
\param[in] geometry This geometry.
\param[in] out Render output.
\param[in] absPose Geometry absolute transform.
\param[in] cullbox Region to visualize.
*/
virtual void visualize(const PxGeometry& geometry, PxRenderOutput& out, const PxTransform& absPose, const PxBounds3& cullbox) const = 0;
/**
\brief Compute custom geometry mass properties. For geometries usable with dynamic rigidbodies.
\param[in] geometry This geometry.
\param[out] massProperties Mass properties to compute.
*/
virtual void computeMassProperties(const PxGeometry& geometry, PxMassProperties& massProperties) const = 0;
/**
\brief Compatible with PhysX's PCM feature. Allows to optimize contact generation.
\param[in] geometry This geometry.
\param[out] breakingThreshold The threshold to trigger contacts re-generation.
*/
virtual bool usePersistentContactManifold(const PxGeometry& geometry, PxReal& breakingThreshold) const = 0;
/* Destructor */
virtual ~Callbacks() {}
};
/**
\brief Default constructor.
Creates an empty object with a NULL callbacks pointer.
*/
PX_INLINE PxCustomGeometry() :
PxGeometry(PxGeometryType::eCUSTOM),
callbacks(NULL)
{}
/**
\brief Constructor.
*/
PX_INLINE PxCustomGeometry(Callbacks& _callbacks) :
PxGeometry(PxGeometryType::eCUSTOM),
callbacks(&_callbacks)
{}
/**
\brief Copy constructor.
\param[in] that Other object
*/
PX_INLINE PxCustomGeometry(const PxCustomGeometry& that) :
PxGeometry(that),
callbacks(that.callbacks)
{}
/**
\brief Assignment operator
*/
PX_INLINE void operator=(const PxCustomGeometry& that)
{
mType = that.mType;
callbacks = that.callbacks;
}
/**
\brief Returns true if the geometry is valid.
\return True if the current settings are valid for shape creation.
@see PxRigidActor::createShape, PxPhysics::createShape
*/
PX_INLINE bool isValid() const;
/**
\brief Returns the custom type of the custom geometry.
*/
PX_INLINE Type getCustomType() const
{
return callbacks ? callbacks->getCustomType() : Type::INVALID();
}
public:
Callbacks* callbacks; //!< A reference to the callbacks object.
};
PX_INLINE bool PxCustomGeometry::isValid() const
{
return mType == PxGeometryType::eCUSTOM && callbacks != NULL;
}
#if !PX_DOXYGEN
} // namespace physx
#endif
/**
\brief Used in pair with IMPLEMENT_CUSTOM_GEOMETRY_TYPE to overwrite Callbacks::getCustomType() callback.
*/
#define DECLARE_CUSTOM_GEOMETRY_TYPE \
static ::physx::PxCustomGeometry::Type TYPE(); \
virtual ::physx::PxCustomGeometry::Type getCustomType() const;
/**
\brief Used in pair with DECLARE_CUSTOM_GEOMETRY_TYPE to overwrite Callbacks::getCustomType() callback.
*/
#define IMPLEMENT_CUSTOM_GEOMETRY_TYPE(CLASS) \
::physx::PxCustomGeometry::Type CLASS::TYPE() \
{ \
static ::physx::PxCustomGeometry::Type customType; \
return customType; \
} \
::physx::PxCustomGeometry::Type CLASS::getCustomType() const \
{ \
return TYPE(); \
}
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/geometry/PxTetrahedron.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_TETRAHEDRON_H
#define PX_TETRAHEDRON_H
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVec3.h"
#if !PX_DOXYGEN
namespace physx
{
#endif
/**
\brief Tetrahedron class.
*/
class PxTetrahedron
{
public:
/**
\brief Constructor
*/
PX_FORCE_INLINE PxTetrahedron() {}
/**
\brief Constructor
\param[in] p0 Point 0
\param[in] p1 Point 1
\param[in] p2 Point 2
\param[in] p3 Point 3
*/
PX_FORCE_INLINE PxTetrahedron(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2, const PxVec3& p3)
{
verts[0] = p0;
verts[1] = p1;
verts[2] = p2;
verts[3] = p3;
}
/**
\brief Copy constructor
\param[in] tetrahedron copy
*/
PX_FORCE_INLINE PxTetrahedron(const PxTetrahedron& tetrahedron)
{
verts[0] = tetrahedron.verts[0];
verts[1] = tetrahedron.verts[1];
verts[2] = tetrahedron.verts[2];
verts[3] = tetrahedron.verts[3];
}
/**
\brief Destructor
*/
PX_FORCE_INLINE ~PxTetrahedron() {}
/**
\brief Assignment operator
*/
PX_FORCE_INLINE void operator=(const PxTetrahedron& tetrahedron)
{
verts[0] = tetrahedron.verts[0];
verts[1] = tetrahedron.verts[1];
verts[2] = tetrahedron.verts[2];
verts[3] = tetrahedron.verts[3];
}
/**
\brief Array of Vertices.
*/
PxVec3 verts[4];
};
#if !PX_DOXYGEN
}
#endif
/** @} */
#endif
|
NVIDIA-Omniverse/PhysX/physx/include/solver/PxSolverDefs.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef PX_SOLVER_DEFS_H
#define PX_SOLVER_DEFS_H
#include "PxPhysXConfig.h"
#include "foundation/PxVec3.h"
#include "foundation/PxMat33.h"
#include "foundation/PxTransform.h"
#include "PxConstraintDesc.h"
#include "geomutils/PxContactPoint.h"
#if PX_VC
#pragma warning(push)
#pragma warning(disable : 4324) // structure was padded due to alignment
#endif
#if !PX_DOXYGEN
namespace physx
{
#endif
struct PxTGSSolverBodyVel;
/**
\brief Struct that the solver uses to store velocity updates for a body
*/
struct PxSolverBody
{
PX_ALIGN(16, PxVec3) linearVelocity; //!< Delta linear velocity computed by the solver
PxU16 maxSolverNormalProgress; //!< Progress counter used by constraint batching and parallel island solver.
PxU16 maxSolverFrictionProgress; //!< Progress counter used by constraint batching and parallel island solver.
PxVec3 angularState; //!< Delta angular velocity state computed by the solver.
PxU32 solverProgress; //!< Progress counter used by constraint batching and parallel island solver
PxSolverBody() : linearVelocity(0.f), maxSolverNormalProgress(0), maxSolverFrictionProgress(0), angularState(0), solverProgress(0)
{
}
};
PX_COMPILE_TIME_ASSERT(sizeof(PxSolverBody) == 32);
/**
\brief Struct that the solver uses to store the state and other properties of a body
*/
struct PxSolverBodyData
{
PX_ALIGN(16, PxVec3 linearVelocity); //!< 12 Pre-solver linear velocity
PxReal invMass; //!< 16 inverse mass
PxVec3 angularVelocity; //!< 28 Pre-solver angular velocity
PxReal reportThreshold; //!< 32 contact force threshold
PxMat33 sqrtInvInertia; //!< 68 inverse inertia in world space
PxReal penBiasClamp; //!< 72 the penetration bias clamp
PxU32 nodeIndex; //!< 76 the node idx of this solverBodyData. Used by solver to reference between solver bodies and island bodies. Not required by immediate mode
PxReal maxContactImpulse; //!< 80 the max contact impulse
PxTransform body2World; //!< 108 the body's transform
PxU16 pad; //!< 112 pad
PX_FORCE_INLINE PxReal projectVelocity(const PxVec3& lin, const PxVec3& ang) const
{
return linearVelocity.dot(lin) + angularVelocity.dot(ang);
}
};
PX_COMPILE_TIME_ASSERT(0 == (sizeof(PxSolverBodyData) & 15));
//----------------------------------
/**
\brief A header that defines the size of a specific batch of constraints (of same type and without dependencies)
*/
struct PxConstraintBatchHeader
{
PxU32 startIndex; //!< Start index for this batch
PxU16 stride; //!< Number of constraints in this batch (range: 1-4)
PxU16 constraintType; //!< The type of constraint this batch references
};
/**
\brief Constraint descriptor used inside the solver
*/
struct PxSolverConstraintDesc
{
static const PxU16 RIGID_BODY = 0xffff;
enum ConstraintType
{
eCONTACT_CONSTRAINT, //!< Defines this pair is a contact constraint
eJOINT_CONSTRAINT //!< Defines this pair is a joint constraint
};
union
{
PxSolverBody* bodyA; //!< bodyA pointer
PxTGSSolverBodyVel* tgsBodyA; //!< bodyA pointer
void* articulationA; //!< Articulation pointer for body A
};
union
{
PxSolverBody* bodyB; //!< BodyB pointer
PxTGSSolverBodyVel* tgsBodyB; //!< BodyB pointer
void* articulationB; //!< Articulation pointer for body B
};
PxU32 bodyADataIndex; //!< Body A's index into the SolverBodyData array
PxU32 bodyBDataIndex; //!< Body B's index into the SolverBodyData array
PxU32 linkIndexA; //!< Link index defining which link in Articulation A this constraint affects. If not an articulation, must be PxSolverConstraintDesc::RIGID_BODY
PxU32 linkIndexB; //!< Link index defining which link in Articulation B this constraint affects. If not an articulation, must be PxSolverConstraintDesc::RIGID_BODY
PxU8* constraint; //!< Pointer to the constraint rows to be solved
void* writeBack; //!< Pointer to the writeback structure results for this given constraint are to be written to
PxU16 progressA; //!< Internal progress counter
PxU16 progressB; //!< Internal progress counter
PxU16 constraintLengthOver16; //!< constraintLength/16, max constraint length is 1MB
PxU8 padding[10];
};
/**
\brief Data structure used for preparing constraints before solving them
*/
struct PxSolverConstraintPrepDescBase
{
enum BodyState
{
eDYNAMIC_BODY = 1 << 0,
eSTATIC_BODY = 1 << 1,
eKINEMATIC_BODY = 1 << 2,
eARTICULATION = 1 << 3
};
PxConstraintInvMassScale invMassScales; //!< In: The local mass scaling for this pair.
PxSolverConstraintDesc* desc; //!< Output: The PxSolverConstraintDesc filled in by contact prep
const PxSolverBody* body0; //!< In: The first body. Stores velocity information. Unused unless contact involves articulations.
const PxSolverBody* body1; //!< In: The second body. Stores velocity information. Unused unless contact involves articulations.
const PxSolverBodyData* data0; //!< In: The first PxSolverBodyData. Stores mass and miscellaneous information for the first body.
const PxSolverBodyData* data1; //!< In: The second PxSolverBodyData. Stores mass and miscellaneous information for the second body
PxTransform bodyFrame0; //!< In: The world-space transform of the first body.
PxTransform bodyFrame1; //!< In: The world-space transform of the second body.
BodyState bodyState0; //!< In: Defines what kind of actor the first body is
BodyState bodyState1; //!< In: Defines what kind of actor the second body is
};
/**
\brief Data structure used for preparing constraints before solving them
*/
struct PxSolverConstraintPrepDesc : public PxSolverConstraintPrepDescBase
{
PX_ALIGN(16, Px1DConstraint* rows); //!< The start of the constraint rows
PxU32 numRows; //!< The number of rows
PxReal linBreakForce, angBreakForce; //!< Break forces
PxReal minResponseThreshold; //!< The minimum response threshold
void* writeback; //!< Pointer to constraint writeback structure. Reports back joint breaking. If not required, set to NULL.
bool disablePreprocessing; //!< Disable joint pre-processing. Pre-processing can improve stability but under certain circumstances, e.g. when some invInertia rows are zero/almost zero, can cause instabilities.
bool improvedSlerp; //!< Use improved slerp model
bool driveLimitsAreForces; //!< Indicates whether drive limits are forces
bool extendedLimits; //!< Indicates whether we want to use extended limits
bool disableConstraint; //!< Disables constraint
PxVec3p body0WorldOffset; //!< Body0 world offset
};
/**
\brief Data structure used for preparing constraints before solving them
*/
struct PxSolverContactDesc : public PxSolverConstraintPrepDescBase
{
void* shapeInteraction; //!< Pointer to shape interaction. Used for force threshold reports in solver. Set to NULL if using immediate mode.
PxContactPoint* contacts; //!< The start of the contacts for this pair
PxU32 numContacts; //!< The total number of contacts this pair references.
bool hasMaxImpulse; //!< Defines whether this pairs has maxImpulses clamping enabled
bool disableStrongFriction; //!< Defines whether this pair disables strong friction (sticky friction correlation)
bool hasForceThresholds; //!< Defines whether this pair requires force thresholds
PxReal restDistance; //!< A distance at which the solver should aim to hold the bodies separated. Default is 0
PxReal maxCCDSeparation; //!< A distance used to configure speculative CCD behavior. Default is PX_MAX_F32. Set internally in PhysX for bodies with eENABLE_SPECULATIVE_CCD on. Do not set directly!
PxU8* frictionPtr; //!< InOut: Friction patch correlation data. Set each frame by solver. Can be retained for improved behavior or discarded each frame.
PxU8 frictionCount; //!< The total number of friction patches in this pair
PxReal* contactForces; //!< Out: A buffer for the solver to write applied contact forces to.
PxU32 startFrictionPatchIndex; //!< Start index of friction patch in the correlation buffer. Set by friction correlation
PxU32 numFrictionPatches; //!< Total number of friction patches in this pair. Set by friction correlation
PxU32 startContactPatchIndex; //!< The start index of this pair's contact patches in the correlation buffer. For internal use only
PxU16 numContactPatches; //!< Total number of contact patches.
PxU16 axisConstraintCount; //!< Axis constraint count. Defines how many constraint rows this pair has produced. Useful for statistical purposes.
PxReal offsetSlop; //!< Slop value used to snap contact line of action back in-line with the COM.
//PxU8 pad[16 - sizeof(void*)];
};
class PxConstraintAllocator
{
public:
/**
\brief Allocates constraint data. It is the application's responsibility to release this memory after PxSolveConstraints has completed.
\param[in] byteSize Allocation size in bytes
\return The allocated memory. This address must be 16-byte aligned.
*/
virtual PxU8* reserveConstraintData(const PxU32 byteSize) = 0;
/**
\brief Allocates friction data. Friction data can be retained by the application for a given pair and provided as an input to PxSolverContactDesc to improve simulation stability.
It is the application's responsibility to release this memory. If this memory is released, the application should ensure it does not pass pointers to this memory to PxSolverContactDesc.
\param[in] byteSize Allocation size in bytes
\return The allocated memory. This address must be 4-byte aligned.
*/
virtual PxU8* reserveFrictionData(const PxU32 byteSize) = 0;
virtual ~PxConstraintAllocator() {}
};
/** \addtogroup physics
@{ */
struct PxArticulationAxis
{
enum Enum
{
eTWIST = 0, //!< Rotational about eX
eSWING1 = 1, //!< Rotational about eY
eSWING2 = 2, //!< Rotational about eZ
eX = 3, //!< Linear in eX
eY = 4, //!< Linear in eY
eZ = 5, //!< Linear in eZ
eCOUNT = 6
};
};
PX_FLAGS_OPERATORS(PxArticulationAxis::Enum, PxU8)
struct PxArticulationMotion
{
enum Enum
{
eLOCKED = 0, //!< Locked axis, i.e. degree of freedom (DOF)
eLIMITED = 1, //!< Limited DOF - set limits of joint DOF together with this flag, see PxArticulationJointReducedCoordinate::setLimitParams
eFREE = 2 //!< Free DOF
};
};
typedef PxFlags<PxArticulationMotion::Enum, PxU8> PxArticulationMotions;
PX_FLAGS_OPERATORS(PxArticulationMotion::Enum, PxU8)
struct PxArticulationJointType
{
enum Enum
{
eFIX = 0, //!< All joint axes, i.e. degrees of freedom (DOFs) locked
ePRISMATIC = 1, //!< Single linear DOF, e.g. cart on a rail
eREVOLUTE = 2, //!< Single rotational DOF, e.g. an elbow joint or a rotational motor, position wrapped at 2pi radians
eREVOLUTE_UNWRAPPED = 3, //!< Single rotational DOF, e.g. an elbow joint or a rotational motor, position not wrapped
eSPHERICAL = 4, //!< Ball and socket joint with two or three DOFs
eUNDEFINED = 5
};
};
struct PxArticulationFlag
{
enum Enum
{
eFIX_BASE = (1 << 0), //!< Set articulation base to be fixed.
eDRIVE_LIMITS_ARE_FORCES = (1<<1), //!< Limits for drive effort are forces and torques rather than impulses, see PxArticulationDrive::maxForce.
eDISABLE_SELF_COLLISION = (1<<2), //!< Disable collisions between the articulation's links (note that parent/child collisions are disabled internally in either case).
eCOMPUTE_JOINT_FORCES = (1<<3) //!< @deprecated Enable in order to be able to query joint solver (i.e. constraint) forces using PxArticulationCache::jointSolverForces.
};
};
typedef PxFlags<PxArticulationFlag::Enum, PxU8> PxArticulationFlags;
PX_FLAGS_OPERATORS(PxArticulationFlag::Enum, PxU8)
struct PxArticulationDriveType
{
enum Enum
{
eFORCE = 0, //!< The output of the implicit spring drive controller is a force/torque.
eACCELERATION = 1, //!< The output of the implicit spring drive controller is a joint acceleration (use this to get (spatial)-inertia-invariant behavior of the drive).
eTARGET = 2, //!< Sets the drive gains internally to track a target position almost kinematically (i.e. with very high drive gains).
eVELOCITY = 3, //!< Sets the drive gains internally to track a target velocity almost kinematically (i.e. with very high drive gains).
eNONE = 4
};
};
/**
\brief Data structure to set articulation joint limits.
- The lower limit should be strictly smaller than the higher limit. If the limits should be equal, use PxArticulationMotion::eLOCKED
and an appropriate offset in the parent/child joint frames.
- The limit units are linear units (equivalent to scene units) for a translational axis, or radians for a rotational axis.
@see PxArticulationJointReducedCoordinate::setLimitParams, PxArticulationReducedCoordinate
*/
struct PxArticulationLimit
{
PxArticulationLimit(){}
PxArticulationLimit(const PxReal low_, const PxReal high_)
{
low = low_;
high = high_;
}
/**
\brief The lower limit on the joint axis position.
<b>Range:</b> [-PX_MAX_F32, high)<br>
<b>Default:</b> 0.0f<br>
*/
PxReal low;
/**
\brief The higher limit on the joint axis position.
<b>Range:</b> (low, PX_MAX_F32]<br>
<b>Default:</b> 0.0f<br>
*/
PxReal high;
};
/**
\brief Data structure for articulation joint drive configuration.
@see PxArticulationJointReducedCoordinate::setDriveParams, PxArticulationReducedCoordinate
*/
struct PxArticulationDrive
{
PxArticulationDrive(){}
PxArticulationDrive(const PxReal stiffness_, const PxReal damping_, const PxReal maxForce_, PxArticulationDriveType::Enum driveType_=PxArticulationDriveType::eFORCE)
{
stiffness = stiffness_;
damping = damping_;
maxForce = maxForce_;
driveType = driveType_;
}
/**
\brief The drive stiffness, i.e. the proportional gain of the implicit PD controller.
See manual for further information, and the drives' implicit spring-damper (i.e. PD control) implementation in particular.
<b>Units:</b> (distance = linear scene units)<br>
Rotational axis: torque/rad if driveType = PxArticulationDriveType::eFORCE; or (rad/s^2)/rad if driveType = PxArticulationDriveType::eACCELERATION<br>
Translational axis: force/distance if driveType = PxArticulationDriveType::eFORCE; or (distance/s^2)/distance if driveType = PxArticulationDriveType::eACCELERATION<br>
<b>Range:</b> [0, PX_MAX_F32]<br>
<b>Default:</b> 0.0f<br>
*/
PxReal stiffness;
/**
\brief The drive damping, i.e. the derivative gain of the implicit PD controller.
See manual for further information, and the drives' implicit spring-damper (i.e. PD control) implementation in particular.
<b>Units:</b> (distance = linear scene units)<br>
Rotational axis: torque/(rad/s) if driveType = PxArticulationDriveType::eFORCE; or (rad/s^2)/(rad/s) if driveType = PxArticulationDriveType::eACCELERATION<br>
Translational axis: force/(distance/s) if driveType = PxArticulationDriveType::eFORCE; or (distance/s^2)/(distance/s) if driveType = PxArticulationDriveType::eACCELERATION<br>
<b>Range:</b> [0, PX_MAX_F32]<br>
<b>Default:</b> 0.0f<br>
*/
PxReal damping;
/**
\brief The drive force limit.
- The limit is enforced regardless of the drive type #PxArticulationDriveType.
- The limit corresponds to a force (linear axis) or torque (rotational axis) if PxArticulationFlag::eDRIVE_LIMITS_ARE_FORCES is set, and to an impulse (force|torque * dt) otherwise.
<b>Range:</b> [0, PX_MAX_F32]<br>
<b>Default:</b> 0.0f<br>
@see PxArticulationFlag::eDRIVE_LIMITS_ARE_FORCES
*/
PxReal maxForce;
/**
\brief The drive type.
@see PxArticulationDriveType
*/
PxArticulationDriveType::Enum driveType;
};
/** @} */
struct PxTGSSolverBodyVel
{
PX_ALIGN(16, PxVec3) linearVelocity; //12
PxU16 nbStaticInteractions; //14 Used to accumulate the number of static interactions
PxU16 maxDynamicPartition; //16 Used to accumulate the max partition of dynamic interactions
PxVec3 angularVelocity; //28
PxU32 partitionMask; //32 Used in partitioning as a bit-field
PxVec3 deltaAngDt; //44
PxReal maxAngVel; //48
PxVec3 deltaLinDt; //60
PxU16 lockFlags; //62
bool isKinematic; //63
PxU8 pad; //64
PX_FORCE_INLINE PxReal projectVelocity(const PxVec3& lin, const PxVec3& ang) const
{
return linearVelocity.dot(lin) + angularVelocity.dot(ang);
}
};
//Needed only by prep, integration and 1D constraints
struct PxTGSSolverBodyTxInertia
{
PxTransform deltaBody2World;
PxMat33 sqrtInvInertia; //!< inverse inertia in world space
};
struct PxTGSSolverBodyData
{
PX_ALIGN(16, PxVec3) originalLinearVelocity; //!< Pre-solver linear velocity.
PxReal maxContactImpulse; //!< The max contact impulse.
PxVec3 originalAngularVelocity; //!< Pre-solver angular velocity
PxReal penBiasClamp; //!< The penetration bias clamp.
PxReal invMass; //!< Inverse mass.
PxU32 nodeIndex; //!< The node idx of this solverBodyData. Used by solver to reference between solver bodies and island bodies. Not required by immediate mode.
PxReal reportThreshold; //!< Contact force threshold.
PxU32 pad;
PxReal projectVelocity(const PxVec3& linear, const PxVec3& angular) const
{
return originalLinearVelocity.dot(linear) + originalAngularVelocity.dot(angular);
}
};
struct PxTGSSolverConstraintPrepDescBase
{
PxConstraintInvMassScale invMassScales; //!< In: The local mass scaling for this pair.
PxSolverConstraintDesc* desc; //!< Output: The PxSolverConstraintDesc filled in by contact prep
const PxTGSSolverBodyVel* body0; //!< In: The first body. Stores velocity information. Unused unless contact involves articulations.
const PxTGSSolverBodyVel* body1; //!< In: The second body. Stores velocity information. Unused unless contact involves articulations.
const PxTGSSolverBodyTxInertia* body0TxI; //!< In: The first PxTGSSolverBodyTxInertia. Stores the delta body to world transform and sqrtInvInertia for first body.
const PxTGSSolverBodyTxInertia* body1TxI; //!< In: The second PxTGSSolverBodyTxInertia. Stores the delta body to world transform and sqrtInvInertia for second body.
const PxTGSSolverBodyData* bodyData0; //!< In: The first PxTGSSolverBodyData. Stores mass and miscellaneous information for the first body.
const PxTGSSolverBodyData* bodyData1; //!< In: The second PxTGSSolverBodyData. Stores mass and miscellaneous information for the second body.
PxTransform bodyFrame0; //!< In: The world-space transform of the first body.
PxTransform bodyFrame1; //!< In: The world-space transform of the second body.
PxSolverContactDesc::BodyState bodyState0; //!< In: Defines what kind of actor the first body is
PxSolverContactDesc::BodyState bodyState1; //!< In: Defines what kind of actor the second body is
};
struct PxTGSSolverConstraintPrepDesc : public PxTGSSolverConstraintPrepDescBase
{
Px1DConstraint* rows; //!< The start of the constraint rows
PxU32 numRows; //!< The number of rows
PxReal linBreakForce, angBreakForce; //!< Break forces
PxReal minResponseThreshold; //!< The minimum response threshold
void* writeback; //!< Pointer to constraint writeback structure. Reports back joint breaking. If not required, set to NULL.
bool disablePreprocessing; //!< Disable joint pre-processing. Pre-processing can improve stability but under certain circumstances, e.g. when some invInertia rows are zero/almost zero, can cause instabilities.
bool improvedSlerp; //!< Use improved slerp model
bool driveLimitsAreForces; //!< Indicates whether drive limits are forces
bool extendedLimits; //!< Indicates whether extended limits are used
bool disableConstraint; //!< Disables constraint
PxVec3p body0WorldOffset; //!< Body0 world offset
PxVec3p cA2w; //!< Location of anchor point A in world space
PxVec3p cB2w; //!< Location of anchor point B in world space
};
struct PxTGSSolverContactDesc : public PxTGSSolverConstraintPrepDescBase
{
void* shapeInteraction; //!< Pointer to shape interaction. Used for force threshold reports in solver. Set to NULL if using immediate mode.
PxContactPoint* contacts; //!< The start of the contacts for this pair
PxU32 numContacts; //!< The total number of contacts this pair references.
bool hasMaxImpulse; //!< Defines whether this pairs has maxImpulses clamping enabled
bool disableStrongFriction; //!< Defines whether this pair disables strong friction (sticky friction correlation)
bool hasForceThresholds; //!< Defines whether this pair requires force thresholds
PxReal restDistance; //!< A distance at which the solver should aim to hold the bodies separated. Default is 0
PxReal maxCCDSeparation; //!< A distance used to configure speculative CCD behavior. Default is PX_MAX_F32. Set internally in PhysX for bodies with eENABLE_SPECULATIVE_CCD on. Do not set directly!
PxU8* frictionPtr; //!< InOut: Friction patch correlation data. Set each frame by solver. Can be retained for improved behavior or discarded each frame.
PxU8 frictionCount; //!< The total number of friction patches in this pair
PxReal* contactForces; //!< Out: A buffer for the solver to write applied contact forces to.
PxU32 startFrictionPatchIndex; //!< Start index of friction patch in the correlation buffer. Set by friction correlation
PxU32 numFrictionPatches; //!< Total number of friction patches in this pair. Set by friction correlation
PxU32 startContactPatchIndex; //!< The start index of this pair's contact patches in the correlation buffer. For internal use only
PxU16 numContactPatches; //!< Total number of contact patches.
PxU16 axisConstraintCount; //!< Axis constraint count. Defines how many constraint rows this pair has produced. Useful for statistical purposes.
PxReal maxImpulse; //!< The maximum impulse the solver is allowed to introduce for this pair of bodies.
PxReal torsionalPatchRadius; //!< This defines the radius of the contact patch used to apply torsional friction.
PxReal minTorsionalPatchRadius; //!< This defines the minimum radius of the contact patch used to apply torsional friction.
PxReal offsetSlop; //!< Slop value used to snap contact line of action back in-line with the COM.
};
#if !PX_DOXYGEN
}
#endif
#if PX_VC
#pragma warning(pop)
#endif
#endif
|
NVIDIA-Omniverse/PhysX/physx/buildtools/cmake_generate_projects.py | import sys
import os
import glob
import os.path
import shutil
import subprocess
import xml.etree.ElementTree
def packmanExt():
if sys.platform == 'win32':
return 'cmd'
return 'sh'
def cmakeExt():
if sys.platform == 'win32':
return '.exe'
return ''
def filterPreset(presetName):
winPresetFilter = ['win','switch','crosscompile']
if sys.platform == 'win32':
if any(presetName.find(elem) != -1 for elem in winPresetFilter):
return True
else:
if all(presetName.find(elem) == -1 for elem in winPresetFilter):
return True
return False
def noPresetProvided():
global input
print('Preset parameter required, available presets:')
presetfiles = []
for file in glob.glob("buildtools/presets/*.xml"):
presetfiles.append(file)
if len(presetfiles) == 0:
for file in glob.glob("buildtools/presets/public/*.xml"):
presetfiles.append(file)
counter = 0
presetList = []
for preset in presetfiles:
if filterPreset(preset):
presetXml = xml.etree.ElementTree.parse(preset).getroot()
if(preset.find('user') == -1):
print('(' + str(counter) + ') ' + presetXml.get('name') +
' <--- ' + presetXml.get('comment'))
presetList.append(presetXml.get('name'))
else:
print('(' + str(counter) + ') ' + presetXml.get('name') +
'.user <--- ' + presetXml.get('comment'))
presetList.append(presetXml.get('name') + '.user')
counter = counter + 1
# Fix Python 2.x.
try:
input = raw_input
except NameError:
pass
mode = int(eval(input('Enter preset number: ')))
return presetList[mode]
class CMakePreset:
presetName = ''
targetPlatform = ''
compiler = ''
generator = ''
cmakeSwitches = []
cmakeParams = []
def __init__(self, presetName):
xmlPath = "buildtools/presets/"+presetName+'.xml'
if os.path.isfile(xmlPath):
print('Using preset xml: '+xmlPath)
else:
xmlPath = "buildtools/presets/public/"+presetName+'.xml'
if os.path.isfile(xmlPath):
print('Using preset xml: '+xmlPath)
else:
print('Preset xml file: '+xmlPath+' not found')
exit()
# get the xml
presetNode = xml.etree.ElementTree.parse(xmlPath).getroot()
self.presetName = presetNode.attrib['name']
for platform in presetNode.findall('platform'):
self.targetPlatform = platform.attrib['targetPlatform']
self.compiler = platform.attrib['compiler']
self.generator = platform.get('generator')
print('Target platform: ' + self.targetPlatform +
' using compiler: ' + self.compiler)
if self.generator is not None:
print(' using generator: ' + self.generator)
for cmakeSwitch in presetNode.find('CMakeSwitches'):
cmSwitch = '-D' + \
cmakeSwitch.attrib['name'] + '=' + \
cmakeSwitch.attrib['value'].upper()
self.cmakeSwitches.append(cmSwitch)
for cmakeParam in presetNode.find('CMakeParams'):
if cmakeParam.attrib['name'] == 'CMAKE_INSTALL_PREFIX' or cmakeParam.attrib['name'] == 'PX_OUTPUT_LIB_DIR' or cmakeParam.attrib['name'] == 'PX_OUTPUT_EXE_DIR' or cmakeParam.attrib['name'] == 'PX_OUTPUT_DLL_DIR':
cmParam = '-D' + cmakeParam.attrib['name'] + '=\"' + \
os.environ['PHYSX_ROOT_DIR'] + '/' + \
cmakeParam.attrib['value'] + '\"'
else:
cmParam = '-D' + \
cmakeParam.attrib['name'] + '=' + \
cmakeParam.attrib['value']
self.cmakeParams.append(cmParam)
pass
def isMultiConfigPlatform(self):
if self.targetPlatform == 'linux':
return False
elif self.targetPlatform == 'linuxAarch64':
return False
return True
def getCMakeSwitches(self):
outString = ''
# We need gpuProjectsFound flag to avoid issues when we have both
# PX_GENERATE_GPU_PROJECTS and PX_GENERATE_GPU_PROJECTS_ONLY switches
gpuProjectsFound = False # initialize flag
for cmakeSwitch in self.cmakeSwitches:
outString = outString + ' ' + cmakeSwitch
if not gpuProjectsFound and cmakeSwitch.find('PX_GENERATE_GPU_PROJECTS') != -1:
gpuProjectsFound = True # set flag to True when keyword found
if os.environ.get('PM_CUDA_PATH') is not None:
outString = outString + ' -DCUDAToolkit_ROOT_DIR=' + \
os.environ['PM_CUDA_PATH']
if self.compiler in ['vc15', 'vc16', 'vc17'] and self.generator != 'ninja':
outString = outString + ' -T cuda=' + os.environ['PM_CUDA_PATH']
# TODO: Need to do the same for gcc (aarch64) when we package it with Packman
elif self.compiler == 'clang':
if os.environ.get('PM_clang_PATH') is not None:
outString = outString + ' -DCMAKE_CUDA_HOST_COMPILER=' + \
os.environ['PM_clang_PATH'] + '/bin/clang++'
return outString
def getCMakeParams(self):
outString = ''
for cmakeParam in self.cmakeParams:
outString = outString + ' ' + cmakeParam # + ' --trace'
return outString
def getPlatformCMakeParams(self):
cmake_modules_root = os.environ['PHYSX_ROOT_DIR'] + '/source/compiler/cmake/modules'
outString = ' '
vs_versions = {
'vc15': '\"Visual Studio 15 2017\"',
'vc16': '\"Visual Studio 16 2019\"',
'vc17': '\"Visual Studio 17 2022\"'
}
# Visual studio
if self.compiler in vs_versions:
generator = '-G \"Ninja Multi-Config\"' if self.generator == 'ninja' else '-G ' + vs_versions[self.compiler]
outString += generator
# mac
elif self.compiler == 'xcode':
outString = outString + '-G Xcode'
# Linux
elif self.targetPlatform in ['linux', 'linuxAarch64']:
if self.generator is not None and self.generator == 'ninja':
outString = outString + '-G \"Ninja\"'
outString = outString + ' -DCMAKE_MAKE_PROGRAM=' + os.environ['PM_ninja_PATH'] + '/ninja'
else:
outString = outString + '-G \"Unix Makefiles\"'
if self.targetPlatform == 'win64':
if self.generator != 'ninja':
outString = outString + ' -Ax64'
outString = outString + ' -DTARGET_BUILD_PLATFORM=windows'
outString = outString + ' -DPX_OUTPUT_ARCH=x86'
return outString
elif self.targetPlatform == 'switch64':
outString = outString + ' -DTARGET_BUILD_PLATFORM=switch'
outString = outString + ' -DCMAKE_TOOLCHAIN_FILE=' + \
cmake_modules_root + '/switch/NX64Toolchain.txt'
outString = outString + ' -DCMAKE_GENERATOR_PLATFORM=NX64'
return outString
elif self.targetPlatform == 'linux':
outString = outString + ' -DTARGET_BUILD_PLATFORM=linux'
outString = outString + ' -DPX_OUTPUT_ARCH=x86'
if self.compiler == 'clang-crosscompile':
outString = outString + ' -DCMAKE_TOOLCHAIN_FILE=' + \
cmake_modules_root + '/linux/LinuxCrossToolchain.x86_64-unknown-linux-gnu.cmake'
outString = outString + ' -DCMAKE_MAKE_PROGRAM=' + os.environ.get('PM_MinGW_PATH') + '/bin/mingw32-make.exe'
elif self.compiler == 'clang':
if os.environ.get('PM_clang_PATH') is not None:
outString = outString + ' -DCMAKE_C_COMPILER=' + \
os.environ['PM_clang_PATH'] + '/bin/clang'
outString = outString + ' -DCMAKE_CXX_COMPILER=' + \
os.environ['PM_clang_PATH'] + '/bin/clang++'
else:
outString = outString + ' -DCMAKE_C_COMPILER=clang'
outString = outString + ' -DCMAKE_CXX_COMPILER=clang++'
return outString
elif self.targetPlatform == 'linuxAarch64':
outString = outString + ' -DTARGET_BUILD_PLATFORM=linux'
outString = outString + ' -DPX_OUTPUT_ARCH=arm'
if self.compiler == 'clang-crosscompile':
outString = outString + ' -DCMAKE_TOOLCHAIN_FILE=' + \
cmake_modules_root + '/linux/LinuxCrossToolchain.aarch64-unknown-linux-gnueabihf.cmake'
outString = outString + ' -DCMAKE_MAKE_PROGRAM=' + os.environ.get('PM_MinGW_PATH') + '/bin/mingw32-make.exe'
elif self.compiler == 'gcc':
# TODO: To change so it uses Packman's compiler. Then add it as
# host compiler for CUDA above.
outString = outString + ' -DCMAKE_TOOLCHAIN_FILE=\"' + \
cmake_modules_root + '/linux/LinuxAarch64.cmake\"'
return outString
elif self.targetPlatform == 'mac64':
outString = outString + ' -DTARGET_BUILD_PLATFORM=mac'
outString = outString + ' -DPX_OUTPUT_ARCH=x86'
return outString
return ''
def getCommonParams():
outString = '--no-warn-unused-cli'
outString = outString + ' -DCMAKE_PREFIX_PATH=\"' + os.environ['PM_PATHS'] + '\"'
outString = outString + ' -DPHYSX_ROOT_DIR=\"' + \
os.environ['PHYSX_ROOT_DIR'] + '\"'
outString = outString + ' -DPX_OUTPUT_LIB_DIR=\"' + \
os.environ['PHYSX_ROOT_DIR'] + '\"'
outString = outString + ' -DPX_OUTPUT_BIN_DIR=\"' + \
os.environ['PHYSX_ROOT_DIR'] + '\"'
if os.environ.get('GENERATE_SOURCE_DISTRO') == '1':
outString = outString + ' -DPX_GENERATE_SOURCE_DISTRO=1'
return outString
def cleanupCompilerDir(compilerDirName):
if os.path.exists(compilerDirName):
if sys.platform == 'win32':
os.system('rmdir /S /Q ' + compilerDirName)
else:
shutil.rmtree(compilerDirName, True)
if os.path.exists(compilerDirName) == False:
os.makedirs(compilerDirName)
def presetProvided(pName):
parsedPreset = CMakePreset(pName)
print('PM_PATHS: ' + os.environ['PM_PATHS'])
if os.environ.get('PM_cmake_PATH') is not None:
cmakeExec = os.environ['PM_cmake_PATH'] + '/bin/cmake' + cmakeExt()
else:
cmakeExec = 'cmake' + cmakeExt()
print('Cmake: ' + cmakeExec)
# gather cmake parameters
cmakeParams = parsedPreset.getPlatformCMakeParams()
cmakeParams = cmakeParams + ' ' + getCommonParams()
cmakeParams = cmakeParams + ' ' + parsedPreset.getCMakeSwitches()
cmakeParams = cmakeParams + ' ' + parsedPreset.getCMakeParams()
# print(cmakeParams)
if os.path.isfile(os.environ['PHYSX_ROOT_DIR'] + '/compiler/internal/CMakeLists.txt'):
cmakeMasterDir = 'internal'
else:
cmakeMasterDir = 'public'
if parsedPreset.isMultiConfigPlatform():
# cleanup and create output directory
outputDir = os.path.join('compiler', parsedPreset.presetName)
cleanupCompilerDir(outputDir)
# run the cmake script
#print('Cmake params:' + cmakeParams)
os.chdir(os.path.join(os.environ['PHYSX_ROOT_DIR'], outputDir))
os.system(cmakeExec + ' \"' +
os.environ['PHYSX_ROOT_DIR'] + '/compiler/' + cmakeMasterDir + '\"' + cmakeParams)
os.chdir(os.environ['PHYSX_ROOT_DIR'])
else:
configs = ['debug', 'checked', 'profile', 'release']
for config in configs:
# cleanup and create output directory
outputDir = os.path.join('compiler', parsedPreset.presetName + '-' + config)
cleanupCompilerDir(outputDir)
# run the cmake script
#print('Cmake params:' + cmakeParams)
os.chdir(os.path.join(os.environ['PHYSX_ROOT_DIR'], outputDir))
# print(cmakeExec + ' \"' + os.environ['PHYSX_ROOT_DIR'] + '/compiler/' + cmakeMasterDir + '\"' + cmakeParams + ' -DCMAKE_BUILD_TYPE=' + config)
os.system(cmakeExec + ' \"' + os.environ['PHYSX_ROOT_DIR'] + '/compiler/' +
cmakeMasterDir + '\"' + cmakeParams + ' -DCMAKE_BUILD_TYPE=' + config)
os.chdir(os.environ['PHYSX_ROOT_DIR'])
pass
def main():
if (sys.version_info[0] < 3) or (sys.version_info[0] == 3 and sys.version_info[1] < 5):
print("You are using Python {}. You must use Python 3.5 and up. Please read README.md for requirements.").format(sys.version)
exit()
physx_root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
os.environ['PHYSX_ROOT_DIR'] = physx_root_dir.replace("\\", "/")
if len(sys.argv) != 2:
presetName = noPresetProvided()
if sys.platform == 'win32':
print('Running generate_projects.bat ' + presetName)
cmd = 'generate_projects.bat {}'.format(presetName)
result = subprocess.run(cmd, cwd=os.environ['PHYSX_ROOT_DIR'], check=True, universal_newlines=True)
# TODO: catch exception and add capture errors
else:
print('Running generate_projects.sh ' + presetName)
# TODO: once we have Python 3.7.2 for linux, add the text=True instead of universal_newlines
cmd = './generate_projects.sh {}'.format(presetName)
result = subprocess.run(['bash', './generate_projects.sh', presetName], cwd=os.environ['PHYSX_ROOT_DIR'], check=True, universal_newlines=True)
# TODO: catch exception and add capture errors
else:
presetName = sys.argv[1]
if filterPreset(presetName):
presetProvided(presetName)
else:
print('Preset not supported on this build platform.')
main()
|
NVIDIA-Omniverse/PhysX/physx/buildtools/presets/public/vc16win64.xml | <?xml version="1.0" encoding="utf-8"?>
<preset name="vc16win64" comment="VC16 Win64 PhysX general settings">
<platform targetPlatform="win64" compiler="vc16" />
<CMakeSwitches>
<cmakeSwitch name="PX_BUILDSNIPPETS" value="True" comment="Generate the snippets" />
<cmakeSwitch name="PX_BUILDPVDRUNTIME" value="True" comment="Generate the OmniPVD project" />
<cmakeSwitch name="PX_GENERATE_STATIC_LIBRARIES" value="False" comment="Generate static libraries" />
<cmakeSwitch name="NV_USE_STATIC_WINCRT" value="True" comment="Use the statically linked windows CRT" />
<cmakeSwitch name="NV_USE_DEBUG_WINCRT" value="True" comment="Use the debug version of the CRT" />
<cmakeSwitch name="PX_FLOAT_POINT_PRECISE_MATH" value="False" comment="Float point precise math" />
</CMakeSwitches>
<CMakeParams>
<cmakeParam name="CMAKE_INSTALL_PREFIX" value="install/vc16win64/PhysX" comment="Install path relative to PhysX SDK root" />
</CMakeParams>
</preset> |
NVIDIA-Omniverse/PhysX/physx/buildtools/presets/public/linux-aarch64.xml | <?xml version="1.0" encoding="utf-8"?>
<preset name="linux-aarch64" comment="Linux-aarch64 gcc PhysX SDK general settings">
<platform targetPlatform="linuxAarch64" compiler="gcc" />
<CMakeSwitches>
<cmakeSwitch name="PX_BUILDSNIPPETS" value="True" comment="Generate the snippets" />
<cmakeSwitch name="PX_BUILDPVDRUNTIME" value="True" comment="Generate the OmniPVD project" />
<cmakeSwitch name="PX_GENERATE_STATIC_LIBRARIES" value="True" comment="Generate static libs" />
</CMakeSwitches>
<CMakeParams>
<cmakeParam name="CMAKE_INSTALL_PREFIX" value="install/linux-aarch64/PhysX" comment="Install path relative to PhysX SDK root" />
</CMakeParams>
</preset> |
NVIDIA-Omniverse/PhysX/physx/buildtools/presets/public/vc17win64.xml | <?xml version="1.0" encoding="utf-8"?>
<preset name="vc17win64" comment="VC17 Win64 PhysX general settings">
<platform targetPlatform="win64" compiler="vc17" />
<CMakeSwitches>
<cmakeSwitch name="PX_BUILDSNIPPETS" value="True" comment="Generate the snippets" />
<cmakeSwitch name="PX_BUILDPVDRUNTIME" value="True" comment="Generate the OmniPVD project" />
<cmakeSwitch name="PX_GENERATE_STATIC_LIBRARIES" value="False" comment="Generate static libraries" />
<cmakeSwitch name="NV_USE_STATIC_WINCRT" value="True" comment="Use the statically linked windows CRT" />
<cmakeSwitch name="NV_USE_DEBUG_WINCRT" value="True" comment="Use the debug version of the CRT" />
<cmakeSwitch name="PX_FLOAT_POINT_PRECISE_MATH" value="False" comment="Float point precise math" />
</CMakeSwitches>
<CMakeParams>
<cmakeParam name="CMAKE_INSTALL_PREFIX" value="install/vc17win64/PhysX" comment="Install path relative to PhysX SDK root" />
</CMakeParams>
</preset> |
NVIDIA-Omniverse/PhysX/physx/buildtools/presets/public/linux.xml | <?xml version="1.0" encoding="utf-8"?>
<preset name="linux" comment="Linux clang PhysX SDK general settings">
<platform targetPlatform="linux" compiler="clang" />
<CMakeSwitches>
<cmakeSwitch name="PX_BUILDSNIPPETS" value="True" comment="Generate the snippets" />
<cmakeSwitch name="PX_BUILDPVDRUNTIME" value="True" comment="Generate the OmniPVD project" />
<cmakeSwitch name="PX_GENERATE_STATIC_LIBRARIES" value="True" comment="Generate static libs" />
</CMakeSwitches>
<CMakeParams>
<cmakeParam name="CMAKE_INSTALL_PREFIX" value="install/linux/PhysX" comment="Install path relative to PhysX SDK root" />
</CMakeParams>
</preset> |
NVIDIA-Omniverse/PhysX/physx/buildtools/presets/public/vc15win64.xml | <?xml version="1.0" encoding="utf-8"?>
<preset name="vc15win64" comment="VC15 Win64 PhysX general settings">
<platform targetPlatform="win64" compiler="vc15" />
<CMakeSwitches>
<cmakeSwitch name="PX_BUILDSNIPPETS" value="True" comment="Generate the snippets" />
<cmakeSwitch name="PX_BUILDPVDRUNTIME" value="True" comment="Generate the OmniPVD project" />
<cmakeSwitch name="PX_GENERATE_STATIC_LIBRARIES" value="False" comment="Generate static libraries" />
<cmakeSwitch name="NV_USE_STATIC_WINCRT" value="True" comment="Use the statically linked windows CRT" />
<cmakeSwitch name="NV_USE_DEBUG_WINCRT" value="True" comment="Use the debug version of the CRT" />
<cmakeSwitch name="PX_FLOAT_POINT_PRECISE_MATH" value="False" comment="Float point precise math" />
</CMakeSwitches>
<CMakeParams>
<cmakeParam name="CMAKE_INSTALL_PREFIX" value="install/vc15win64/PhysX" comment="Install path relative to PhysX SDK root" />
</CMakeParams>
</preset> |
NVIDIA-Omniverse/PhysX/physx/buildtools/templates/boilerplate_bsd.txt | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved. |
NVIDIA-Omniverse/PhysX/physx/buildtools/templates/PxIncludeTemplate.h | ${BOILERPLATE_CONTENT}
#ifndef PX_${HEADER_GUARD_NAME}
#define PX_${HEADER_GUARD_NAME}
${HEADER_CONTENT}
#endif // PX_${HEADER_GUARD_NAME}
|
NVIDIA-Omniverse/PhysX/physx/buildtools/packman/python.sh | #!/bin/bash
# Copyright 2019-2020 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
PACKMAN_CMD="$(dirname "${BASH_SOURCE}")/packman"
if [ ! -f "$PACKMAN_CMD" ]; then
PACKMAN_CMD="${PACKMAN_CMD}.sh"
fi
source "$PACKMAN_CMD" init
export PYTHONPATH="${PM_MODULE_DIR}:${PYTHONPATH}"
if [ -z "${PYTHONNOUSERSITE:-}" ]; then
export PYTHONNOUSERSITE=1
fi
# For performance, default to unbuffered; however, allow overriding via
# PYTHONUNBUFFERED=0 since PYTHONUNBUFFERED on windows can truncate output
# when printing long strings
if [ -z "${PYTHONUNBUFFERED:-}" ]; then
export PYTHONUNBUFFERED=1
fi
# workaround for our python not shipping with certs
if [[ -z ${SSL_CERT_DIR:-} ]]; then
export SSL_CERT_DIR=/etc/ssl/certs/
fi
"${PM_PYTHON}" "$@"
|
NVIDIA-Omniverse/PhysX/physx/buildtools/packman/python.bat | :: Copyright 2019-2020 NVIDIA CORPORATION
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
@echo off
setlocal enableextensions
call "%~dp0\packman" init
set "PYTHONPATH=%PM_MODULE_DIR%;%PYTHONPATH%"
if not defined PYTHONNOUSERSITE (
set PYTHONNOUSERSITE=1
)
REM For performance, default to unbuffered; however, allow overriding via
REM PYTHONUNBUFFERED=0 since PYTHONUNBUFFERED on windows can truncate output
REM when printing long strings
if not defined PYTHONUNBUFFERED (
set PYTHONUNBUFFERED=1
)
"%PM_PYTHON%" %* |
NVIDIA-Omniverse/PhysX/physx/buildtools/packman/packman.cmd | :: RUN_PM_MODULE must always be at the same spot for packman update to work (batch reloads file during update!)
:: [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx]
:: Reset errorlevel status (don't inherit from caller)
@call :ECHO_AND_RESET_ERROR
:: You can remove this section if you do your own manual configuration of the dev machines
call :CONFIGURE
if %errorlevel% neq 0 ( exit /b %errorlevel% )
:: Everything below is mandatory
if not defined PM_PYTHON goto :PYTHON_ENV_ERROR
if not defined PM_MODULE goto :MODULE_ENV_ERROR
set PM_VAR_PATH_ARG=
if "%1"=="pull" goto :SET_VAR_PATH
if "%1"=="install" goto :SET_VAR_PATH
:RUN_PM_MODULE
"%PM_PYTHON%" -S -s -u -E "%PM_MODULE%" %* %PM_VAR_PATH_ARG%
if %errorlevel% neq 0 ( exit /b %errorlevel% )
:: Marshall environment variables into the current environment if they have been generated and remove temporary file
if exist "%PM_VAR_PATH%" (
for /F "usebackq tokens=*" %%A in ("%PM_VAR_PATH%") do set "%%A"
)
if %errorlevel% neq 0 ( goto :VAR_ERROR )
if exist "%PM_VAR_PATH%" (
del /F "%PM_VAR_PATH%"
)
if %errorlevel% neq 0 ( goto :VAR_ERROR )
set PM_VAR_PATH=
goto :eof
:: Subroutines below
:PYTHON_ENV_ERROR
@echo User environment variable PM_PYTHON is not set! Please configure machine for packman or call configure.bat.
exit /b 1
:MODULE_ENV_ERROR
@echo User environment variable PM_MODULE is not set! Please configure machine for packman or call configure.bat.
exit /b 1
:VAR_ERROR
@echo Error while processing and setting environment variables!
exit /b 1
:: pad [xxxx]
:ECHO_AND_RESET_ERROR
@echo off
if /I "%PM_VERBOSITY%"=="debug" (
@echo on
)
exit /b 0
:SET_VAR_PATH
:: Generate temporary path for variable file
for /f "delims=" %%a in ('%PM_PYTHON% -S -s -u -E -c "import tempfile;file = tempfile.NamedTemporaryFile(mode='w+t', delete=False);print(file.name)"') do (set PM_VAR_PATH=%%a)
set PM_VAR_PATH_ARG=--var-path="%PM_VAR_PATH%"
goto :RUN_PM_MODULE
:CONFIGURE
:: Must capture and set code page to work around issue #279, powershell invocation mutates console font
:: This issue only happens in Windows CMD shell when using 65001 code page. Some Git Bash implementations
:: don't support chcp so this workaround is a bit convoluted.
:: Test for chcp:
chcp > nul 2>&1
if %errorlevel% equ 0 (
for /f "tokens=2 delims=:" %%a in ('chcp') do (set PM_OLD_CODE_PAGE=%%a)
) else (
call :ECHO_AND_RESET_ERROR
)
:: trim leading space (this is safe even when PM_OLD_CODE_PAGE has not been set)
set PM_OLD_CODE_PAGE=%PM_OLD_CODE_PAGE:~1%
if "%PM_OLD_CODE_PAGE%" equ "65001" (
chcp 437 > nul
set PM_RESTORE_CODE_PAGE=1
)
call "%~dp0\bootstrap\configure.bat"
set PM_CONFIG_ERRORLEVEL=%errorlevel%
if defined PM_RESTORE_CODE_PAGE (
:: Restore code page
chcp %PM_OLD_CODE_PAGE% > nul
)
set PM_OLD_CODE_PAGE=
set PM_RESTORE_CODE_PAGE=
exit /b %PM_CONFIG_ERRORLEVEL%
|
NVIDIA-Omniverse/PhysX/physx/buildtools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="https" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
</remote2>
</config>
|
NVIDIA-Omniverse/PhysX/physx/buildtools/packman/bootstrap/generate_temp_file_name.ps1 | <#
Copyright 2019 NVIDIA CORPORATION
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#>
$out = [System.IO.Path]::GetTempFileName()
Write-Host $out
# SIG # Begin signature block
# MIIaVwYJKoZIhvcNAQcCoIIaSDCCGkQCAQExDzANBglghkgBZQMEAgEFADB5Bgor
# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG
# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCAK+Ewup1N0/mdf
# 1l4R58rxyumHgZvTmEhrYTb2Zf0zd6CCCiIwggTTMIIDu6ADAgECAhBi50XpIWUh
# PJcfXEkK6hKlMA0GCSqGSIb3DQEBCwUAMIGEMQswCQYDVQQGEwJVUzEdMBsGA1UE
# ChMUU3ltYW50ZWMgQ29ycG9yYXRpb24xHzAdBgNVBAsTFlN5bWFudGVjIFRydXN0
# IE5ldHdvcmsxNTAzBgNVBAMTLFN5bWFudGVjIENsYXNzIDMgU0hBMjU2IENvZGUg
# U2lnbmluZyBDQSAtIEcyMB4XDTE4MDcwOTAwMDAwMFoXDTIxMDcwOTIzNTk1OVow
# gYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRQwEgYDVQQHDAtT
# YW50YSBDbGFyYTEbMBkGA1UECgwSTlZJRElBIENvcnBvcmF0aW9uMQ8wDQYDVQQL
# DAZJVC1NSVMxGzAZBgNVBAMMEk5WSURJQSBDb3Jwb3JhdGlvbjCCASIwDQYJKoZI
# hvcNAQEBBQADggEPADCCAQoCggEBALEZN63dA47T4i90jZ84CJ/aWUwVtLff8AyP
# YspFfIZGdZYiMgdb8A5tBh7653y0G/LZL6CVUkgejcpvBU/Dl/52a+gSWy2qJ2bH
# jMFMKCyQDhdpCAKMOUKSC9rfzm4cFeA9ct91LQCAait4LhLlZt/HF7aG+r0FgCZa
# HJjJvE7KNY9G4AZXxjSt8CXS8/8NQMANqjLX1r+F+Hl8PzQ1fVx0mMsbdtaIV4Pj
# 5flAeTUnz6+dCTx3vTUo8MYtkS2UBaQv7t7H2B7iwJDakEQKk1XHswJdeqG0osDU
# z6+NVks7uWE1N8UIhvzbw0FEX/U2kpfyWaB/J3gMl8rVR8idPj8CAwEAAaOCAT4w
# ggE6MAkGA1UdEwQCMAAwDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUF
# BwMDMGEGA1UdIARaMFgwVgYGZ4EMAQQBMEwwIwYIKwYBBQUHAgEWF2h0dHBzOi8v
# ZC5zeW1jYi5jb20vY3BzMCUGCCsGAQUFBwICMBkMF2h0dHBzOi8vZC5zeW1jYi5j
# b20vcnBhMB8GA1UdIwQYMBaAFNTABiJJ6zlL3ZPiXKG4R3YJcgNYMCsGA1UdHwQk
# MCIwIKAeoByGGmh0dHA6Ly9yYi5zeW1jYi5jb20vcmIuY3JsMFcGCCsGAQUFBwEB
# BEswSTAfBggrBgEFBQcwAYYTaHR0cDovL3JiLnN5bWNkLmNvbTAmBggrBgEFBQcw
# AoYaaHR0cDovL3JiLnN5bWNiLmNvbS9yYi5jcnQwDQYJKoZIhvcNAQELBQADggEB
# AIJKh5vKJdhHJtMzATmc1BmXIQ3RaJONOZ5jMHn7HOkYU1JP0OIzb4pXXkH8Xwfr
# K6bnd72IhcteyksvKsGpSvK0PBBwzodERTAu1Os2N+EaakxQwV/xtqDm1E3IhjHk
# fRshyKKzmFk2Ci323J4lHtpWUj5Hz61b8gd72jH7xnihGi+LORJ2uRNZ3YuqMNC3
# SBC8tAyoJqEoTJirULUCXW6wX4XUm5P2sx+htPw7szGblVKbQ+PFinNGnsSEZeKz
# D8jUb++1cvgTKH59Y6lm43nsJjkZU77tNqyq4ABwgQRk6lt8cS2PPwjZvTmvdnla
# ZhR0K4of+pQaUQHXVIBdji8wggVHMIIEL6ADAgECAhB8GzU1SufbdOdBXxFpymuo
# MA0GCSqGSIb3DQEBCwUAMIG9MQswCQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNp
# Z24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNV
# BAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
# IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmlj
# YXRpb24gQXV0aG9yaXR5MB4XDTE0MDcyMjAwMDAwMFoXDTI0MDcyMTIzNTk1OVow
# gYQxCzAJBgNVBAYTAlVTMR0wGwYDVQQKExRTeW1hbnRlYyBDb3Jwb3JhdGlvbjEf
# MB0GA1UECxMWU3ltYW50ZWMgVHJ1c3QgTmV0d29yazE1MDMGA1UEAxMsU3ltYW50
# ZWMgQ2xhc3MgMyBTSEEyNTYgQ29kZSBTaWduaW5nIENBIC0gRzIwggEiMA0GCSqG
# SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDXlUPU3N9nrjn7UqS2JjEEcOm3jlsqujdp
# NZWPu8Aw54bYc7vf69F2P4pWjustS/BXGE6xjaUz0wt1I9VqeSfdo9P3Dodltd6t
# HPH1NbQiUa8iocFdS5B/wFlOq515qQLXHkmxO02H/sJ4q7/vUq6crwjZOeWaUT5p
# XzAQTnFjbFjh8CAzGw90vlvLEuHbjMSAlHK79kWansElC/ujHJ7YpglwcezAR0yP
# fcPeGc4+7gRyjhfT//CyBTIZTNOwHJ/+pXggQnBBsCaMbwDIOgARQXpBsKeKkQSg
# mXj0d7TzYCrmbFAEtxRg/w1R9KiLhP4h2lxeffUpeU+wRHRvbXL/AgMBAAGjggF4
# MIIBdDAuBggrBgEFBQcBAQQiMCAwHgYIKwYBBQUHMAGGEmh0dHA6Ly9zLnN5bWNk
# LmNvbTASBgNVHRMBAf8ECDAGAQH/AgEAMGYGA1UdIARfMF0wWwYLYIZIAYb4RQEH
# FwMwTDAjBggrBgEFBQcCARYXaHR0cHM6Ly9kLnN5bWNiLmNvbS9jcHMwJQYIKwYB
# BQUHAgIwGRoXaHR0cHM6Ly9kLnN5bWNiLmNvbS9ycGEwNgYDVR0fBC8wLTAroCmg
# J4YlaHR0cDovL3Muc3ltY2IuY29tL3VuaXZlcnNhbC1yb290LmNybDATBgNVHSUE
# DDAKBggrBgEFBQcDAzAOBgNVHQ8BAf8EBAMCAQYwKQYDVR0RBCIwIKQeMBwxGjAY
# BgNVBAMTEVN5bWFudGVjUEtJLTEtNzI0MB0GA1UdDgQWBBTUwAYiSes5S92T4lyh
# uEd2CXIDWDAfBgNVHSMEGDAWgBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG
# 9w0BAQsFAAOCAQEAf+vKp+qLdkLrPo4gVDDjt7nc+kg+FscPRZUQzSeGo2bzAu1x
# +KrCVZeRcIP5Un5SaTzJ8eCURoAYu6HUpFam8x0AkdWG80iH4MvENGggXrTL+QXt
# nK9wUye56D5+UaBpcYvcUe2AOiUyn0SvbkMo0yF1u5fYi4uM/qkERgSF9xWcSxGN
# xCwX/tVuf5riVpLxlrOtLfn039qJmc6yOETA90d7yiW5+ipoM5tQct6on9TNLAs0
# vYsweEDgjY4nG5BvGr4IFYFd6y/iUedRHsl4KeceZb847wFKAQkkDhbEFHnBQTc0
# 0D2RUpSd4WjvCPDiaZxnbpALGpNx1CYCw8BaIzGCD4swgg+HAgEBMIGZMIGEMQsw
# CQYDVQQGEwJVUzEdMBsGA1UEChMUU3ltYW50ZWMgQ29ycG9yYXRpb24xHzAdBgNV
# BAsTFlN5bWFudGVjIFRydXN0IE5ldHdvcmsxNTAzBgNVBAMTLFN5bWFudGVjIENs
# YXNzIDMgU0hBMjU2IENvZGUgU2lnbmluZyBDQSAtIEcyAhBi50XpIWUhPJcfXEkK
# 6hKlMA0GCWCGSAFlAwQCAQUAoHwwEAYKKwYBBAGCNwIBDDECMAAwGQYJKoZIhvcN
# AQkDMQwGCisGAQQBgjcCAQQwHAYKKwYBBAGCNwIBCzEOMAwGCisGAQQBgjcCARUw
# LwYJKoZIhvcNAQkEMSIEIPW+EpFrZSdzrjFFo0UT+PzFeYn/GcWNyWFaU/JMrMfR
# MA0GCSqGSIb3DQEBAQUABIIBAA8fmU/RJcF9t60DZZAjf8FB3EZddOaHgI9z40nV
# CnfTGi0OEYU48Pe9jkQQV2fABpACfW74xmNv3QNgP2qP++mkpKBVv28EIAuINsFt
# YAITEljLN/VOVul8lvjxar5GSFFgpE5F6j4xcvI69LuCWbN8cteTVsBGg+eGmjfx
# QZxP252z3FqPN+mihtFegF2wx6Mg6/8jZjkO0xjBOwSdpTL4uyQfHvaPBKXuWxRx
# ioXw4ezGAwkuBoxWK8UG7Qu+7CSfQ3wMOjvyH2+qn30lWEsvRMdbGAp7kvfr3EGZ
# a3WN7zXZ+6KyZeLeEH7yCDzukAjptaY/+iLVjJsuzC6tCSqhgg1EMIINQAYKKwYB
# BAGCNwMDATGCDTAwgg0sBgkqhkiG9w0BBwKggg0dMIINGQIBAzEPMA0GCWCGSAFl
# AwQCAQUAMHcGCyqGSIb3DQEJEAEEoGgEZjBkAgEBBglghkgBhv1sBwEwMTANBglg
# hkgBZQMEAgEFAAQg14BnPazQkW9whhZu1d0bC3lqqScvxb3SSb1QT8e3Xg0CEFhw
# aMBZ2hExXhr79A9+bXEYDzIwMjEwNDA4MDkxMTA5WqCCCjcwggT+MIID5qADAgEC
# AhANQkrgvjqI/2BAIc4UAPDdMA0GCSqGSIb3DQEBCwUAMHIxCzAJBgNVBAYTAlVT
# MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
# b20xMTAvBgNVBAMTKERpZ2lDZXJ0IFNIQTIgQXNzdXJlZCBJRCBUaW1lc3RhbXBp
# bmcgQ0EwHhcNMjEwMTAxMDAwMDAwWhcNMzEwMTA2MDAwMDAwWjBIMQswCQYDVQQG
# EwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xIDAeBgNVBAMTF0RpZ2lDZXJ0
# IFRpbWVzdGFtcCAyMDIxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
# wuZhhGfFivUNCKRFymNrUdc6EUK9CnV1TZS0DFC1JhD+HchvkWsMlucaXEjvROW/
# m2HNFZFiWrj/ZwucY/02aoH6KfjdK3CF3gIY83htvH35x20JPb5qdofpir34hF0e
# dsnkxnZ2OlPR0dNaNo/Go+EvGzq3YdZz7E5tM4p8XUUtS7FQ5kE6N1aG3JMjjfdQ
# Jehk5t3Tjy9XtYcg6w6OLNUj2vRNeEbjA4MxKUpcDDGKSoyIxfcwWvkUrxVfbENJ
# Cf0mI1P2jWPoGqtbsR0wwptpgrTb/FZUvB+hh6u+elsKIC9LCcmVp42y+tZji06l
# chzun3oBc/gZ1v4NSYS9AQIDAQABo4IBuDCCAbQwDgYDVR0PAQH/BAQDAgeAMAwG
# A1UdEwEB/wQCMAAwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwgwQQYDVR0gBDowODA2
# BglghkgBhv1sBwEwKTAnBggrBgEFBQcCARYbaHR0cDovL3d3dy5kaWdpY2VydC5j
# b20vQ1BTMB8GA1UdIwQYMBaAFPS24SAd/imu0uRhpbKiJbLIFzVuMB0GA1UdDgQW
# BBQ2RIaOpLqwZr68KC0dRDbd42p6vDBxBgNVHR8EajBoMDKgMKAuhixodHRwOi8v
# Y3JsMy5kaWdpY2VydC5jb20vc2hhMi1hc3N1cmVkLXRzLmNybDAyoDCgLoYsaHR0
# cDovL2NybDQuZGlnaWNlcnQuY29tL3NoYTItYXNzdXJlZC10cy5jcmwwgYUGCCsG
# AQUFBwEBBHkwdzAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQuY29t
# ME8GCCsGAQUFBzAChkNodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNl
# cnRTSEEyQXNzdXJlZElEVGltZXN0YW1waW5nQ0EuY3J0MA0GCSqGSIb3DQEBCwUA
# A4IBAQBIHNy16ZojvOca5yAOjmdG/UJyUXQKI0ejq5LSJcRwWb4UoOUngaVNFBUZ
# B3nw0QTDhtk7vf5EAmZN7WmkD/a4cM9i6PVRSnh5Nnont/PnUp+Tp+1DnnvntN1B
# Ion7h6JGA0789P63ZHdjXyNSaYOC+hpT7ZDMjaEXcw3082U5cEvznNZ6e9oMvD0y
# 0BvL9WH8dQgAdryBDvjA4VzPxBFy5xtkSdgimnUVQvUtMjiB2vRgorq0Uvtc4GEk
# JU+y38kpqHNDUdq9Y9YfW5v3LhtPEx33Sg1xfpe39D+E68Hjo0mh+s6nv1bPull2
# YYlffqe0jmd4+TaY4cso2luHpoovMIIFMTCCBBmgAwIBAgIQCqEl1tYyG35B5AXa
# NpfCFTANBgkqhkiG9w0BAQsFADBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGln
# aUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtE
# aWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMTYwMTA3MTIwMDAwWhcNMzEw
# MTA3MTIwMDAwWjByMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5j
# MRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMTEwLwYDVQQDEyhEaWdpQ2VydCBT
# SEEyIEFzc3VyZWQgSUQgVGltZXN0YW1waW5nIENBMIIBIjANBgkqhkiG9w0BAQEF
# AAOCAQ8AMIIBCgKCAQEAvdAy7kvNj3/dqbqCmcU5VChXtiNKxA4HRTNREH3Q+X1N
# aH7ntqD0jbOI5Je/YyGQmL8TvFfTw+F+CNZqFAA49y4eO+7MpvYyWf5fZT/gm+vj
# RkcGGlV+Cyd+wKL1oODeIj8O/36V+/OjuiI+GKwR5PCZA207hXwJ0+5dyJoLVOOo
# CXFr4M8iEA91z3FyTgqt30A6XLdR4aF5FMZNJCMwXbzsPGBqrC8HzP3w6kfZiFBe
# /WZuVmEnKYmEUeaC50ZQ/ZQqLKfkdT66mA+Ef58xFNat1fJky3seBdCEGXIX8RcG
# 7z3N1k3vBkL9olMqT4UdxB08r8/arBD13ays6Vb/kwIDAQABo4IBzjCCAcowHQYD
# VR0OBBYEFPS24SAd/imu0uRhpbKiJbLIFzVuMB8GA1UdIwQYMBaAFEXroq/0ksuC
# MS1Ri6enIZ3zbcgPMBIGA1UdEwEB/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGG
# MBMGA1UdJQQMMAoGCCsGAQUFBwMIMHkGCCsGAQUFBwEBBG0wazAkBggrBgEFBQcw
# AYYYaHR0cDovL29jc3AuZGlnaWNlcnQuY29tMEMGCCsGAQUFBzAChjdodHRwOi8v
# Y2FjZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNlcnRBc3N1cmVkSURSb290Q0EuY3J0
# MIGBBgNVHR8EejB4MDqgOKA2hjRodHRwOi8vY3JsNC5kaWdpY2VydC5jb20vRGln
# aUNlcnRBc3N1cmVkSURSb290Q0EuY3JsMDqgOKA2hjRodHRwOi8vY3JsMy5kaWdp
# Y2VydC5jb20vRGlnaUNlcnRBc3N1cmVkSURSb290Q0EuY3JsMFAGA1UdIARJMEcw
# OAYKYIZIAYb9bAACBDAqMCgGCCsGAQUFBwIBFhxodHRwczovL3d3dy5kaWdpY2Vy
# dC5jb20vQ1BTMAsGCWCGSAGG/WwHATANBgkqhkiG9w0BAQsFAAOCAQEAcZUS6VGH
# VmnN793afKpjerN4zwY3QITvS4S/ys8DAv3Fp8MOIEIsr3fzKx8MIVoqtwU0HWqu
# mfgnoma/Capg33akOpMP+LLR2HwZYuhegiUexLoceywh4tZbLBQ1QwRostt1AuBy
# x5jWPGTlH0gQGF+JOGFNYkYkh2OMkVIsrymJ5Xgf1gsUpYDXEkdws3XVk4WTfraS
# Z/tTYYmo9WuWwPRYaQ18yAGxuSh1t5ljhSKMYcp5lH5Z/IwP42+1ASa2bKXuh1Eh
# 5Fhgm7oMLSttosR+u8QlK0cCCHxJrhO24XxCQijGGFbPQTS2Zl22dHv1VjMiLyI2
# skuiSpXY9aaOUjGCAk0wggJJAgEBMIGGMHIxCzAJBgNVBAYTAlVTMRUwEwYDVQQK
# EwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xMTAvBgNV
# BAMTKERpZ2lDZXJ0IFNIQTIgQXNzdXJlZCBJRCBUaW1lc3RhbXBpbmcgQ0ECEA1C
# SuC+Ooj/YEAhzhQA8N0wDQYJYIZIAWUDBAIBBQCggZgwGgYJKoZIhvcNAQkDMQ0G
# CyqGSIb3DQEJEAEEMBwGCSqGSIb3DQEJBTEPFw0yMTA0MDgwOTExMDlaMCsGCyqG
# SIb3DQEJEAIMMRwwGjAYMBYEFOHXgqjhkb7va8oWkbWqtJSmJJvzMC8GCSqGSIb3
# DQEJBDEiBCCHEAmNNj2zWjWYRfEi4FgzZvrI16kv/U2b9b3oHw6UVDANBgkqhkiG
# 9w0BAQEFAASCAQCdefEKh6Qmwx7xGCkrYi/A+/Cla6LdnYJp38eMs3fqTTvjhyDw
# HffXrwdqWy5/fgW3o3qJXqa5o7hLxYIoWSULOCpJRGdt+w7XKPAbZqHrN9elAhWJ
# vpBTCEaj7dVxr1Ka4NsoPSYe0eidDBmmvGvp02J4Z1j8+ImQPKN6Hv/L8Ixaxe7V
# mH4VtXIiBK8xXdi4wzO+A+qLtHEJXz3Gw8Bp3BNtlDGIUkIhVTM3Q1xcSEqhOLqo
# PGdwCw9acxdXNWWPjOJkNH656Bvmkml+0p6MTGIeG4JCeRh1Wpqm1ZGSoEcXNaof
# wOgj48YzI+dNqBD9i7RSWCqJr2ygYKRTxnuU
# SIG # End signature block
|
NVIDIA-Omniverse/PhysX/physx/buildtools/packman/bootstrap/configure.bat | :: Copyright 2019-2023 NVIDIA CORPORATION
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
set PM_PACKMAN_VERSION=7.15.1
:: Specify where packman command is rooted
set PM_INSTALL_PATH=%~dp0..
:: The external root may already be configured and we should do minimal work in that case
if defined PM_PACKAGES_ROOT goto ENSURE_DIR
:: If the folder isn't set we assume that the best place for it is on the drive that we are currently
:: running from
set PM_DRIVE=%CD:~0,2%
set PM_PACKAGES_ROOT=%PM_DRIVE%\packman-repo
:: We use *setx* here so that the variable is persisted in the user environment
echo Setting user environment variable PM_PACKAGES_ROOT to %PM_PACKAGES_ROOT%
setx PM_PACKAGES_ROOT %PM_PACKAGES_ROOT%
if %errorlevel% neq 0 ( goto ERROR )
:: The above doesn't work properly from a build step in VisualStudio because a separate process is
:: spawned for it so it will be lost for subsequent compilation steps - VisualStudio must
:: be launched from a new process. We catch this odd-ball case here:
if defined PM_DISABLE_VS_WARNING goto ENSURE_DIR
if not defined VSLANG goto ENSURE_DIR
echo The above is a once-per-computer operation. Unfortunately VisualStudio cannot pick up environment change
echo unless *VisualStudio is RELAUNCHED*.
echo If you are launching VisualStudio from command line or command line utility make sure
echo you have a fresh launch environment (relaunch the command line or utility).
echo If you are using 'linkPath' and referring to packages via local folder links you can safely ignore this warning.
echo You can disable this warning by setting the environment variable PM_DISABLE_VS_WARNING.
echo.
:: Check for the directory that we need. Note that mkdir will create any directories
:: that may be needed in the path
:ENSURE_DIR
if not exist "%PM_PACKAGES_ROOT%" (
echo Creating packman packages cache at %PM_PACKAGES_ROOT%
mkdir "%PM_PACKAGES_ROOT%"
)
if %errorlevel% neq 0 ( goto ERROR_MKDIR_PACKAGES_ROOT )
:: The Python interpreter may already be externally configured
if defined PM_PYTHON_EXT (
set PM_PYTHON=%PM_PYTHON_EXT%
goto PACKMAN
)
set PM_PYTHON_VERSION=3.10.5-1-windows-x86_64
set PM_PYTHON_BASE_DIR=%PM_PACKAGES_ROOT%\python
set PM_PYTHON_DIR=%PM_PYTHON_BASE_DIR%\%PM_PYTHON_VERSION%
set PM_PYTHON=%PM_PYTHON_DIR%\python.exe
if exist "%PM_PYTHON%" goto PACKMAN
if not exist "%PM_PYTHON_BASE_DIR%" call :CREATE_PYTHON_BASE_DIR
set PM_PYTHON_PACKAGE=python@%PM_PYTHON_VERSION%.cab
for /f "delims=" %%a in ('powershell -ExecutionPolicy ByPass -NoLogo -NoProfile -File "%~dp0\generate_temp_file_name.ps1"') do set TEMP_FILE_NAME=%%a
set TARGET=%TEMP_FILE_NAME%.zip
call "%~dp0fetch_file_from_packman_bootstrap.cmd" %PM_PYTHON_PACKAGE% "%TARGET%"
if %errorlevel% neq 0 (
echo !!! Error fetching python from CDN !!!
goto ERROR
)
for /f "delims=" %%a in ('powershell -ExecutionPolicy ByPass -NoLogo -NoProfile -File "%~dp0\generate_temp_folder.ps1" -parentPath "%PM_PYTHON_BASE_DIR%"') do set TEMP_FOLDER_NAME=%%a
echo Unpacking Python interpreter ...
"%SystemRoot%\system32\expand.exe" -F:* "%TARGET%" "%TEMP_FOLDER_NAME%" 1> nul
del "%TARGET%"
:: Failure during extraction to temp folder name, need to clean up and abort
if %errorlevel% neq 0 (
echo !!! Error unpacking python !!!
call :CLEAN_UP_TEMP_FOLDER
goto ERROR
)
:: If python has now been installed by a concurrent process we need to clean up and then continue
if exist "%PM_PYTHON%" (
call :CLEAN_UP_TEMP_FOLDER
goto PACKMAN
) else (
if exist "%PM_PYTHON_DIR%" ( rd /s /q "%PM_PYTHON_DIR%" > nul )
)
:: Perform atomic move (allowing overwrite, /y)
move /y "%TEMP_FOLDER_NAME%" "%PM_PYTHON_DIR%" 1> nul
:: Verify that python.exe is now where we expect
if exist "%PM_PYTHON%" goto PACKMAN
:: Wait a second and try again (can help with access denied weirdness)
timeout /t 1 /nobreak 1> nul
move /y "%TEMP_FOLDER_NAME%" "%PM_PYTHON_DIR%" 1> nul
if %errorlevel% neq 0 (
echo !!! Error moving python %TEMP_FOLDER_NAME% -> %PM_PYTHON_DIR% !!!
call :CLEAN_UP_TEMP_FOLDER
goto ERROR
)
:PACKMAN
:: The packman module may already be externally configured
if defined PM_MODULE_DIR_EXT (
set PM_MODULE_DIR=%PM_MODULE_DIR_EXT%
) else (
set PM_MODULE_DIR=%PM_PACKAGES_ROOT%\packman-common\%PM_PACKMAN_VERSION%
)
set PM_MODULE=%PM_MODULE_DIR%\run.py
if exist "%PM_MODULE%" goto END
:: Clean out broken PM_MODULE_DIR if it exists
if exist "%PM_MODULE_DIR%" ( rd /s /q "%PM_MODULE_DIR%" > nul )
set PM_MODULE_PACKAGE=packman-common@%PM_PACKMAN_VERSION%.zip
for /f "delims=" %%a in ('powershell -ExecutionPolicy ByPass -NoLogo -NoProfile -File "%~dp0\generate_temp_file_name.ps1"') do set TEMP_FILE_NAME=%%a
set TARGET=%TEMP_FILE_NAME%
call "%~dp0fetch_file_from_packman_bootstrap.cmd" %PM_MODULE_PACKAGE% "%TARGET%"
if %errorlevel% neq 0 (
echo !!! Error fetching packman from CDN !!!
goto ERROR
)
echo Unpacking ...
"%PM_PYTHON%" -S -s -u -E "%~dp0\install_package.py" "%TARGET%" "%PM_MODULE_DIR%"
if %errorlevel% neq 0 (
echo !!! Error unpacking packman !!!
goto ERROR
)
del "%TARGET%"
goto END
:ERROR_MKDIR_PACKAGES_ROOT
echo Failed to automatically create packman packages repo at %PM_PACKAGES_ROOT%.
echo Please set a location explicitly that packman has permission to write to, by issuing:
echo.
echo setx PM_PACKAGES_ROOT {path-you-choose-for-storing-packman-packages-locally}
echo.
echo Then launch a new command console for the changes to take effect and run packman command again.
exit /B %errorlevel%
:ERROR
echo !!! Failure while configuring local machine :( !!!
exit /B %errorlevel%
:CLEAN_UP_TEMP_FOLDER
rd /S /Q "%TEMP_FOLDER_NAME%"
exit /B
:CREATE_PYTHON_BASE_DIR
:: We ignore errors and clean error state - if two processes create the directory one will fail which is fine
md "%PM_PYTHON_BASE_DIR%" > nul 2>&1
exit /B 0
:END
|
NVIDIA-Omniverse/PhysX/physx/buildtools/packman/bootstrap/fetch_file_from_packman_bootstrap.cmd | :: Copyright 2019 NVIDIA CORPORATION
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
:: You need to specify <package-name> <target-path> as input to this command
@setlocal
@set PACKAGE_NAME=%1
@set TARGET_PATH=%2
@echo Fetching %PACKAGE_NAME% ...
@powershell -ExecutionPolicy ByPass -NoLogo -NoProfile -File "%~dp0download_file_from_url.ps1" ^
-source "http://bootstrap.packman.nvidia.com/%PACKAGE_NAME%" -output %TARGET_PATH%
:: A bug in powershell prevents the errorlevel code from being set when using the -File execution option
:: We must therefore do our own failure analysis, basically make sure the file exists:
@if not exist %TARGET_PATH% goto ERROR_DOWNLOAD_FAILED
@endlocal
@exit /b 0
:ERROR_DOWNLOAD_FAILED
@echo Failed to download file from S3
@echo Most likely because endpoint cannot be reached or file %PACKAGE_NAME% doesn't exist
@endlocal
@exit /b 1 |
NVIDIA-Omniverse/PhysX/physx/buildtools/packman/bootstrap/download_file_from_url.ps1 | <#
Copyright 2019 NVIDIA CORPORATION
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#>
param(
[Parameter(Mandatory=$true)][string]$source=$null,
[string]$output="out.exe"
)
$filename = $output
$triesLeft = 4
$delay = 2
do
{
$triesLeft -= 1
try
{
Write-Host "Downloading from bootstrap.packman.nvidia.com ..."
$wc = New-Object net.webclient
$wc.Downloadfile($source, $fileName)
exit 0
}
catch
{
Write-Host "Error downloading $source!"
Write-Host $_.Exception|format-list -force
if ($triesLeft)
{
Write-Host "Retrying in $delay seconds ..."
Start-Sleep -seconds $delay
}
$delay = $delay * $delay
}
} while ($triesLeft -gt 0)
# We only get here if the retries have been exhausted, remove any left-overs:
if (Test-Path $fileName)
{
Remove-Item $fileName
}
exit 1 |
NVIDIA-Omniverse/PhysX/physx/buildtools/packman/bootstrap/generate_temp_folder.ps1 | <#
Copyright 2019 NVIDIA CORPORATION
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#>
param(
[Parameter(Mandatory=$true)][string]$parentPath=$null
)
[string] $name = [System.Guid]::NewGuid()
$out = Join-Path $parentPath $name
New-Item -ItemType Directory -Path ($out) | Out-Null
Write-Host $out
# SIG # Begin signature block
# MIIaVwYJKoZIhvcNAQcCoIIaSDCCGkQCAQExDzANBglghkgBZQMEAgEFADB5Bgor
# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG
# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCB29nsqMEu+VmSF
# 7ckeVTPrEZ6hsXjOgPFlJm9ilgHUB6CCCiIwggTTMIIDu6ADAgECAhBi50XpIWUh
# PJcfXEkK6hKlMA0GCSqGSIb3DQEBCwUAMIGEMQswCQYDVQQGEwJVUzEdMBsGA1UE
# ChMUU3ltYW50ZWMgQ29ycG9yYXRpb24xHzAdBgNVBAsTFlN5bWFudGVjIFRydXN0
# IE5ldHdvcmsxNTAzBgNVBAMTLFN5bWFudGVjIENsYXNzIDMgU0hBMjU2IENvZGUg
# U2lnbmluZyBDQSAtIEcyMB4XDTE4MDcwOTAwMDAwMFoXDTIxMDcwOTIzNTk1OVow
# gYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRQwEgYDVQQHDAtT
# YW50YSBDbGFyYTEbMBkGA1UECgwSTlZJRElBIENvcnBvcmF0aW9uMQ8wDQYDVQQL
# DAZJVC1NSVMxGzAZBgNVBAMMEk5WSURJQSBDb3Jwb3JhdGlvbjCCASIwDQYJKoZI
# hvcNAQEBBQADggEPADCCAQoCggEBALEZN63dA47T4i90jZ84CJ/aWUwVtLff8AyP
# YspFfIZGdZYiMgdb8A5tBh7653y0G/LZL6CVUkgejcpvBU/Dl/52a+gSWy2qJ2bH
# jMFMKCyQDhdpCAKMOUKSC9rfzm4cFeA9ct91LQCAait4LhLlZt/HF7aG+r0FgCZa
# HJjJvE7KNY9G4AZXxjSt8CXS8/8NQMANqjLX1r+F+Hl8PzQ1fVx0mMsbdtaIV4Pj
# 5flAeTUnz6+dCTx3vTUo8MYtkS2UBaQv7t7H2B7iwJDakEQKk1XHswJdeqG0osDU
# z6+NVks7uWE1N8UIhvzbw0FEX/U2kpfyWaB/J3gMl8rVR8idPj8CAwEAAaOCAT4w
# ggE6MAkGA1UdEwQCMAAwDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUF
# BwMDMGEGA1UdIARaMFgwVgYGZ4EMAQQBMEwwIwYIKwYBBQUHAgEWF2h0dHBzOi8v
# ZC5zeW1jYi5jb20vY3BzMCUGCCsGAQUFBwICMBkMF2h0dHBzOi8vZC5zeW1jYi5j
# b20vcnBhMB8GA1UdIwQYMBaAFNTABiJJ6zlL3ZPiXKG4R3YJcgNYMCsGA1UdHwQk
# MCIwIKAeoByGGmh0dHA6Ly9yYi5zeW1jYi5jb20vcmIuY3JsMFcGCCsGAQUFBwEB
# BEswSTAfBggrBgEFBQcwAYYTaHR0cDovL3JiLnN5bWNkLmNvbTAmBggrBgEFBQcw
# AoYaaHR0cDovL3JiLnN5bWNiLmNvbS9yYi5jcnQwDQYJKoZIhvcNAQELBQADggEB
# AIJKh5vKJdhHJtMzATmc1BmXIQ3RaJONOZ5jMHn7HOkYU1JP0OIzb4pXXkH8Xwfr
# K6bnd72IhcteyksvKsGpSvK0PBBwzodERTAu1Os2N+EaakxQwV/xtqDm1E3IhjHk
# fRshyKKzmFk2Ci323J4lHtpWUj5Hz61b8gd72jH7xnihGi+LORJ2uRNZ3YuqMNC3
# SBC8tAyoJqEoTJirULUCXW6wX4XUm5P2sx+htPw7szGblVKbQ+PFinNGnsSEZeKz
# D8jUb++1cvgTKH59Y6lm43nsJjkZU77tNqyq4ABwgQRk6lt8cS2PPwjZvTmvdnla
# ZhR0K4of+pQaUQHXVIBdji8wggVHMIIEL6ADAgECAhB8GzU1SufbdOdBXxFpymuo
# MA0GCSqGSIb3DQEBCwUAMIG9MQswCQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNp
# Z24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNV
# BAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
# IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmlj
# YXRpb24gQXV0aG9yaXR5MB4XDTE0MDcyMjAwMDAwMFoXDTI0MDcyMTIzNTk1OVow
# gYQxCzAJBgNVBAYTAlVTMR0wGwYDVQQKExRTeW1hbnRlYyBDb3Jwb3JhdGlvbjEf
# MB0GA1UECxMWU3ltYW50ZWMgVHJ1c3QgTmV0d29yazE1MDMGA1UEAxMsU3ltYW50
# ZWMgQ2xhc3MgMyBTSEEyNTYgQ29kZSBTaWduaW5nIENBIC0gRzIwggEiMA0GCSqG
# SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDXlUPU3N9nrjn7UqS2JjEEcOm3jlsqujdp
# NZWPu8Aw54bYc7vf69F2P4pWjustS/BXGE6xjaUz0wt1I9VqeSfdo9P3Dodltd6t
# HPH1NbQiUa8iocFdS5B/wFlOq515qQLXHkmxO02H/sJ4q7/vUq6crwjZOeWaUT5p
# XzAQTnFjbFjh8CAzGw90vlvLEuHbjMSAlHK79kWansElC/ujHJ7YpglwcezAR0yP
# fcPeGc4+7gRyjhfT//CyBTIZTNOwHJ/+pXggQnBBsCaMbwDIOgARQXpBsKeKkQSg
# mXj0d7TzYCrmbFAEtxRg/w1R9KiLhP4h2lxeffUpeU+wRHRvbXL/AgMBAAGjggF4
# MIIBdDAuBggrBgEFBQcBAQQiMCAwHgYIKwYBBQUHMAGGEmh0dHA6Ly9zLnN5bWNk
# LmNvbTASBgNVHRMBAf8ECDAGAQH/AgEAMGYGA1UdIARfMF0wWwYLYIZIAYb4RQEH
# FwMwTDAjBggrBgEFBQcCARYXaHR0cHM6Ly9kLnN5bWNiLmNvbS9jcHMwJQYIKwYB
# BQUHAgIwGRoXaHR0cHM6Ly9kLnN5bWNiLmNvbS9ycGEwNgYDVR0fBC8wLTAroCmg
# J4YlaHR0cDovL3Muc3ltY2IuY29tL3VuaXZlcnNhbC1yb290LmNybDATBgNVHSUE
# DDAKBggrBgEFBQcDAzAOBgNVHQ8BAf8EBAMCAQYwKQYDVR0RBCIwIKQeMBwxGjAY
# BgNVBAMTEVN5bWFudGVjUEtJLTEtNzI0MB0GA1UdDgQWBBTUwAYiSes5S92T4lyh
# uEd2CXIDWDAfBgNVHSMEGDAWgBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG
# 9w0BAQsFAAOCAQEAf+vKp+qLdkLrPo4gVDDjt7nc+kg+FscPRZUQzSeGo2bzAu1x
# +KrCVZeRcIP5Un5SaTzJ8eCURoAYu6HUpFam8x0AkdWG80iH4MvENGggXrTL+QXt
# nK9wUye56D5+UaBpcYvcUe2AOiUyn0SvbkMo0yF1u5fYi4uM/qkERgSF9xWcSxGN
# xCwX/tVuf5riVpLxlrOtLfn039qJmc6yOETA90d7yiW5+ipoM5tQct6on9TNLAs0
# vYsweEDgjY4nG5BvGr4IFYFd6y/iUedRHsl4KeceZb847wFKAQkkDhbEFHnBQTc0
# 0D2RUpSd4WjvCPDiaZxnbpALGpNx1CYCw8BaIzGCD4swgg+HAgEBMIGZMIGEMQsw
# CQYDVQQGEwJVUzEdMBsGA1UEChMUU3ltYW50ZWMgQ29ycG9yYXRpb24xHzAdBgNV
# BAsTFlN5bWFudGVjIFRydXN0IE5ldHdvcmsxNTAzBgNVBAMTLFN5bWFudGVjIENs
# YXNzIDMgU0hBMjU2IENvZGUgU2lnbmluZyBDQSAtIEcyAhBi50XpIWUhPJcfXEkK
# 6hKlMA0GCWCGSAFlAwQCAQUAoHwwEAYKKwYBBAGCNwIBDDECMAAwGQYJKoZIhvcN
# AQkDMQwGCisGAQQBgjcCAQQwHAYKKwYBBAGCNwIBCzEOMAwGCisGAQQBgjcCARUw
# LwYJKoZIhvcNAQkEMSIEIG5YDmcpqLxn4SB0H6OnuVkZRPh6OJ77eGW/6Su/uuJg
# MA0GCSqGSIb3DQEBAQUABIIBAA3N2vqfA6WDgqz/7EoAKVIE5Hn7xpYDGhPvFAMV
# BslVpeqE3apTcYFCEcwLtzIEc/zmpULxsX8B0SUT2VXbJN3zzQ80b+gbgpq62Zk+
# dQLOtLSiPhGW7MXLahgES6Oc2dUFaQ+wDfcelkrQaOVZkM4wwAzSapxuf/13oSIk
# ZX2ewQEwTZrVYXELO02KQIKUR30s/oslGVg77ALnfK9qSS96Iwjd4MyT7PzCkHUi
# ilwyGJi5a4ofiULiPSwUQNynSBqxa+JQALkHP682b5xhjoDfyG8laR234FTPtYgs
# P/FaeviwENU5Pl+812NbbtRD+gKlWBZz+7FKykOT/CG8sZahgg1EMIINQAYKKwYB
# BAGCNwMDATGCDTAwgg0sBgkqhkiG9w0BBwKggg0dMIINGQIBAzEPMA0GCWCGSAFl
# AwQCAQUAMHcGCyqGSIb3DQEJEAEEoGgEZjBkAgEBBglghkgBhv1sBwEwMTANBglg
# hkgBZQMEAgEFAAQgJhABfkDIPbI+nWYnA30FLTyaPK+W3QieT21B/vK+CMICEDF0
# worcGsdd7OxpXLP60xgYDzIwMjEwNDA4MDkxMTA5WqCCCjcwggT+MIID5qADAgEC
# AhANQkrgvjqI/2BAIc4UAPDdMA0GCSqGSIb3DQEBCwUAMHIxCzAJBgNVBAYTAlVT
# MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
# b20xMTAvBgNVBAMTKERpZ2lDZXJ0IFNIQTIgQXNzdXJlZCBJRCBUaW1lc3RhbXBp
# bmcgQ0EwHhcNMjEwMTAxMDAwMDAwWhcNMzEwMTA2MDAwMDAwWjBIMQswCQYDVQQG
# EwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xIDAeBgNVBAMTF0RpZ2lDZXJ0
# IFRpbWVzdGFtcCAyMDIxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
# wuZhhGfFivUNCKRFymNrUdc6EUK9CnV1TZS0DFC1JhD+HchvkWsMlucaXEjvROW/
# m2HNFZFiWrj/ZwucY/02aoH6KfjdK3CF3gIY83htvH35x20JPb5qdofpir34hF0e
# dsnkxnZ2OlPR0dNaNo/Go+EvGzq3YdZz7E5tM4p8XUUtS7FQ5kE6N1aG3JMjjfdQ
# Jehk5t3Tjy9XtYcg6w6OLNUj2vRNeEbjA4MxKUpcDDGKSoyIxfcwWvkUrxVfbENJ
# Cf0mI1P2jWPoGqtbsR0wwptpgrTb/FZUvB+hh6u+elsKIC9LCcmVp42y+tZji06l
# chzun3oBc/gZ1v4NSYS9AQIDAQABo4IBuDCCAbQwDgYDVR0PAQH/BAQDAgeAMAwG
# A1UdEwEB/wQCMAAwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwgwQQYDVR0gBDowODA2
# BglghkgBhv1sBwEwKTAnBggrBgEFBQcCARYbaHR0cDovL3d3dy5kaWdpY2VydC5j
# b20vQ1BTMB8GA1UdIwQYMBaAFPS24SAd/imu0uRhpbKiJbLIFzVuMB0GA1UdDgQW
# BBQ2RIaOpLqwZr68KC0dRDbd42p6vDBxBgNVHR8EajBoMDKgMKAuhixodHRwOi8v
# Y3JsMy5kaWdpY2VydC5jb20vc2hhMi1hc3N1cmVkLXRzLmNybDAyoDCgLoYsaHR0
# cDovL2NybDQuZGlnaWNlcnQuY29tL3NoYTItYXNzdXJlZC10cy5jcmwwgYUGCCsG
# AQUFBwEBBHkwdzAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQuY29t
# ME8GCCsGAQUFBzAChkNodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNl
# cnRTSEEyQXNzdXJlZElEVGltZXN0YW1waW5nQ0EuY3J0MA0GCSqGSIb3DQEBCwUA
# A4IBAQBIHNy16ZojvOca5yAOjmdG/UJyUXQKI0ejq5LSJcRwWb4UoOUngaVNFBUZ
# B3nw0QTDhtk7vf5EAmZN7WmkD/a4cM9i6PVRSnh5Nnont/PnUp+Tp+1DnnvntN1B
# Ion7h6JGA0789P63ZHdjXyNSaYOC+hpT7ZDMjaEXcw3082U5cEvznNZ6e9oMvD0y
# 0BvL9WH8dQgAdryBDvjA4VzPxBFy5xtkSdgimnUVQvUtMjiB2vRgorq0Uvtc4GEk
# JU+y38kpqHNDUdq9Y9YfW5v3LhtPEx33Sg1xfpe39D+E68Hjo0mh+s6nv1bPull2
# YYlffqe0jmd4+TaY4cso2luHpoovMIIFMTCCBBmgAwIBAgIQCqEl1tYyG35B5AXa
# NpfCFTANBgkqhkiG9w0BAQsFADBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGln
# aUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtE
# aWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMTYwMTA3MTIwMDAwWhcNMzEw
# MTA3MTIwMDAwWjByMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5j
# MRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMTEwLwYDVQQDEyhEaWdpQ2VydCBT
# SEEyIEFzc3VyZWQgSUQgVGltZXN0YW1waW5nIENBMIIBIjANBgkqhkiG9w0BAQEF
# AAOCAQ8AMIIBCgKCAQEAvdAy7kvNj3/dqbqCmcU5VChXtiNKxA4HRTNREH3Q+X1N
# aH7ntqD0jbOI5Je/YyGQmL8TvFfTw+F+CNZqFAA49y4eO+7MpvYyWf5fZT/gm+vj
# RkcGGlV+Cyd+wKL1oODeIj8O/36V+/OjuiI+GKwR5PCZA207hXwJ0+5dyJoLVOOo
# CXFr4M8iEA91z3FyTgqt30A6XLdR4aF5FMZNJCMwXbzsPGBqrC8HzP3w6kfZiFBe
# /WZuVmEnKYmEUeaC50ZQ/ZQqLKfkdT66mA+Ef58xFNat1fJky3seBdCEGXIX8RcG
# 7z3N1k3vBkL9olMqT4UdxB08r8/arBD13ays6Vb/kwIDAQABo4IBzjCCAcowHQYD
# VR0OBBYEFPS24SAd/imu0uRhpbKiJbLIFzVuMB8GA1UdIwQYMBaAFEXroq/0ksuC
# MS1Ri6enIZ3zbcgPMBIGA1UdEwEB/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGG
# MBMGA1UdJQQMMAoGCCsGAQUFBwMIMHkGCCsGAQUFBwEBBG0wazAkBggrBgEFBQcw
# AYYYaHR0cDovL29jc3AuZGlnaWNlcnQuY29tMEMGCCsGAQUFBzAChjdodHRwOi8v
# Y2FjZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNlcnRBc3N1cmVkSURSb290Q0EuY3J0
# MIGBBgNVHR8EejB4MDqgOKA2hjRodHRwOi8vY3JsNC5kaWdpY2VydC5jb20vRGln
# aUNlcnRBc3N1cmVkSURSb290Q0EuY3JsMDqgOKA2hjRodHRwOi8vY3JsMy5kaWdp
# Y2VydC5jb20vRGlnaUNlcnRBc3N1cmVkSURSb290Q0EuY3JsMFAGA1UdIARJMEcw
# OAYKYIZIAYb9bAACBDAqMCgGCCsGAQUFBwIBFhxodHRwczovL3d3dy5kaWdpY2Vy
# dC5jb20vQ1BTMAsGCWCGSAGG/WwHATANBgkqhkiG9w0BAQsFAAOCAQEAcZUS6VGH
# VmnN793afKpjerN4zwY3QITvS4S/ys8DAv3Fp8MOIEIsr3fzKx8MIVoqtwU0HWqu
# mfgnoma/Capg33akOpMP+LLR2HwZYuhegiUexLoceywh4tZbLBQ1QwRostt1AuBy
# x5jWPGTlH0gQGF+JOGFNYkYkh2OMkVIsrymJ5Xgf1gsUpYDXEkdws3XVk4WTfraS
# Z/tTYYmo9WuWwPRYaQ18yAGxuSh1t5ljhSKMYcp5lH5Z/IwP42+1ASa2bKXuh1Eh
# 5Fhgm7oMLSttosR+u8QlK0cCCHxJrhO24XxCQijGGFbPQTS2Zl22dHv1VjMiLyI2
# skuiSpXY9aaOUjGCAk0wggJJAgEBMIGGMHIxCzAJBgNVBAYTAlVTMRUwEwYDVQQK
# EwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xMTAvBgNV
# BAMTKERpZ2lDZXJ0IFNIQTIgQXNzdXJlZCBJRCBUaW1lc3RhbXBpbmcgQ0ECEA1C
# SuC+Ooj/YEAhzhQA8N0wDQYJYIZIAWUDBAIBBQCggZgwGgYJKoZIhvcNAQkDMQ0G
# CyqGSIb3DQEJEAEEMBwGCSqGSIb3DQEJBTEPFw0yMTA0MDgwOTExMDlaMCsGCyqG
# SIb3DQEJEAIMMRwwGjAYMBYEFOHXgqjhkb7va8oWkbWqtJSmJJvzMC8GCSqGSIb3
# DQEJBDEiBCDvFxQ6lYLr8vB+9czUl19rjCw1pWhhUXw/SqOmvIa/VDANBgkqhkiG
# 9w0BAQEFAASCAQB9ox2UrcUXQsBI4Uycnhl4AMpvhVXJME62tygFMppW1l7QftDy
# LvfPKRYm2YUioak/APxAS6geRKpeMkLvXuQS/Jlv0kY3BjxkeG0eVjvyjF4SvXbZ
# 3JCk9m7wLNE+xqOo0ICjYlIJJgRLudjWkC5Skpb1NpPS8DOaIYwRV+AWaSOUPd9P
# O5yVcnbl7OpK3EAEtwDrybCVBMPn2MGhAXybIHnth3+MFp1b6Blhz3WlReQyarjq
# 1f+zaFB79rg6JswXoOTJhwICBP3hO2Ua3dMAswbfl+QNXF+igKLJPYnaeSVhBbm6
# VCu2io27t4ixqvoD0RuPObNX/P3oVA38afiM
# SIG # End signature block
|
NVIDIA-Omniverse/PhysX/physx/buildtools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import os
import stat
import time
from typing import Any, Callable
RENAME_RETRY_COUNT = 100
RENAME_RETRY_DELAY = 0.1
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
def remove_directory_item(path):
if os.path.islink(path) or os.path.isfile(path):
try:
os.remove(path)
except PermissionError:
# make sure we have access and try again:
os.chmod(path, stat.S_IRWXU)
os.remove(path)
else:
# try first to delete the dir because this will work for folder junctions, otherwise we would follow the junctions and cause destruction!
clean_out_folder = False
try:
# make sure we have access preemptively - this is necessary because recursing into a directory without permissions
# will only lead to heart ache
os.chmod(path, stat.S_IRWXU)
os.rmdir(path)
except OSError:
clean_out_folder = True
if clean_out_folder:
# we should make sure the directory is empty
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
remove_directory_item(fullname)
# now try to again get rid of the folder - and not catch if it raises:
os.rmdir(path)
class StagingDirectory:
def __init__(self, staging_path):
self.staging_path = staging_path
self.temp_folder_path = None
os.makedirs(staging_path, exist_ok=True)
def __enter__(self):
self.temp_folder_path = tempfile.mkdtemp(prefix="ver-", dir=self.staging_path)
return self
def get_temp_folder_path(self):
return self.temp_folder_path
# this function renames the temp staging folder to folder_name, it is required that the parent path exists!
def promote_and_rename(self, folder_name):
abs_dst_folder_name = os.path.join(self.staging_path, folder_name)
os.rename(self.temp_folder_path, abs_dst_folder_name)
def __exit__(self, type, value, traceback):
# Remove temp staging folder if it's still there (something went wrong):
path = self.temp_folder_path
if os.path.isdir(path):
remove_directory_item(path)
def rename_folder(staging_dir: StagingDirectory, folder_name: str):
try:
staging_dir.promote_and_rename(folder_name)
except OSError as exc:
# if we failed to rename because the folder now exists we can assume that another packman process
# has managed to update the package before us - in all other cases we re-raise the exception
abs_dst_folder_name = os.path.join(staging_dir.staging_path, folder_name)
if os.path.exists(abs_dst_folder_name):
logger.warning(
f"Directory {abs_dst_folder_name} already present, package installation already completed"
)
else:
raise
def call_with_retry(
op_name: str, func: Callable, retry_count: int = 3, retry_delay: float = 20
) -> Any:
retries_left = retry_count
while True:
try:
return func()
except (OSError, IOError) as exc:
logger.warning(f"Failure while executing {op_name} [{str(exc)}]")
if retries_left:
retry_str = "retry" if retries_left == 1 else "retries"
logger.warning(
f"Retrying after {retry_delay} seconds"
f" ({retries_left} {retry_str} left) ..."
)
time.sleep(retry_delay)
else:
logger.error("Maximum retries exceeded, giving up")
raise
retries_left -= 1
def rename_folder_with_retry(staging_dir: StagingDirectory, folder_name):
dst_path = os.path.join(staging_dir.staging_path, folder_name)
call_with_retry(
f"rename {staging_dir.get_temp_folder_path()} -> {dst_path}",
lambda: rename_folder(staging_dir, folder_name),
RENAME_RETRY_COUNT,
RENAME_RETRY_DELAY,
)
def install_package(package_path, install_path):
staging_path, version = os.path.split(install_path)
with StagingDirectory(staging_path) as staging_dir:
output_folder = staging_dir.get_temp_folder_path()
with zipfile.ZipFile(package_path, allowZip64=True) as zip_file:
zip_file.extractall(output_folder)
# attempt the rename operation
rename_folder_with_retry(staging_dir, version)
print(f"Package successfully installed to {install_path}")
if __name__ == "__main__":
executable_paths = os.getenv("PATH")
paths_list = executable_paths.split(os.path.pathsep) if executable_paths else []
target_path_np = os.path.normpath(sys.argv[2])
target_path_np_nc = os.path.normcase(target_path_np)
for exec_path in paths_list:
if os.path.normcase(os.path.normpath(exec_path)) == target_path_np_nc:
raise RuntimeError(f"packman will not install to executable path '{exec_path}'")
install_package(sys.argv[1], target_path_np)
|
NVIDIA-Omniverse/PhysX/blast/PACKAGE-INFO.yaml | Package : blast-sdk
Maintainers : Bryan Galdrikian, Eric Arnold
Description : Blast destruction SDK
SWIPAT NvBug :
Repository : https://gitlab-master.nvidia.com/omniverse/blast-sdk
License Type : NVIDIA |
NVIDIA-Omniverse/PhysX/blast/repo.bat | @echo off
call "%~dp0tools\packman\python.bat" "%~dp0tools\repoman\repoman.py" %*
if %errorlevel% neq 0 ( goto Error )
:Success
exit /b 0
:Error
exit /b %errorlevel%
|
NVIDIA-Omniverse/PhysX/blast/build.bat | @echo off
call "%~dp0repo" build %*
|
NVIDIA-Omniverse/PhysX/blast/format_code.bat | @echo off
call "%~dp0repo" format %*
|
NVIDIA-Omniverse/PhysX/blast/build.sh | #!/bin/bash
set -e
SCRIPT_DIR=$(dirname ${BASH_SOURCE})
source "$SCRIPT_DIR/repo.sh" build $@ || exit $?
|
NVIDIA-Omniverse/PhysX/blast/format_code.sh | #!/bin/bash
set -e
SCRIPT_DIR=$(dirname ${BASH_SOURCE})
source "$SCRIPT_DIR/repo.sh" format $@ || exit $?
|
NVIDIA-Omniverse/PhysX/blast/VERSION.md | 5.0.4
|
NVIDIA-Omniverse/PhysX/blast/repo.sh | #!/bin/bash
set -e
SCRIPT_DIR=$(dirname ${BASH_SOURCE})
cd "$SCRIPT_DIR"
exec "tools/packman/python.sh" tools/repoman/repoman.py "$@"
|
NVIDIA-Omniverse/PhysX/blast/repo.toml | ########################################################################################################################
# Repo tool base settings
########################################################################################################################
[repo]
# Repository Name. It is used for solution name and final package name
name = "blast-sdk"
########################################################################################################################
# Build tool setup
########################################################################################################################
[repo_build]
# List of packman projects to pull (in order)
fetch.packman_host_files_to_pull = [
"${root}/deps/host-deps.packman.xml",
]
fetch.packman_target_files_to_pull = [
"${root}/deps/target-deps.packman.xml",
]
vscode.python = "${root}/_build/target-deps/python36"
vscode.python_env.PYTHONPATH= [
"$$$${PYTHONPATH}",
"${env:PYTHONPATH}"
]
vscode.python_env.PATH= [
"$$$${PATH}",
"$root/_build/$platform/$config",
]
vscode.write_python_paths_in_settings_json = true
vscode.generate_python_env_file = false
#licensing.enabled = true
#licensing.packages = [
# "${root}/deps/target-deps.packman.xml",
# "${root}/deps/usd-deps.packman.xml",
#]
#licensing.fail_on_missing = true
# Disable pip license gathering (we don't have any)
fetch.pip.gather_licenses_path = ""
msbuild.sln_file = "blast-sdk.sln"
msbuild.vs_version = "vs2017"
[[repo_build.argument]]
name = "-py"
help = "Python version."
kwargs.choices = ["0", "27", "36", "37"]
kwargs.nargs = 1
extra_premake_args = ["--python-version={}"]
########################################################################################################################
# Code Format Tool
########################################################################################################################
[repo_format]
|
NVIDIA-Omniverse/PhysX/blast/premake5.lua | newoption {
trigger = "platform-host",
description = "(Optional) Specify host platform for cross-compilation"
}
-- Include omni.repo.build premake tools
local repo_build = require('omni/repo/build')
-- Path defines
local target_deps = "_build/target-deps"
-- Enable /sourcelink flag for VS
repo_build.enable_vstudio_sourcelink()
-- Remove /JMC parameter for visual studio
repo_build.remove_vstudio_jmc()
-- Wrapper funcion around path.getabsolute() which makes drive letter lowercase on windows.
-- Otherwise drive letter can alter the case depending on environment and cause solution to reload.
function get_abs_path(p)
p = path.getabsolute(p)
if os.target() == "windows" then
p = p:gsub("^%a:", function(c) return c:lower() end)
end
return p
end
function copy_to_file(filePath, newPath)
local filePathAbs = get_abs_path(filePath)
local dir = newPath:match("(.*[\\/])")
if os.target() == "windows" then
if dir ~= "" then
--dir = dir:gsub('/', '\\')
postbuildcommands { "{MKDIR} \""..dir.."\"" }
end
-- Using {COPY} on Windows adds an IF EXIST with an extra backslash which doesn't work
filePathAbs = filePathAbs:gsub('/', '\\')
newPath = newPath:gsub('/', '\\')
postbuildcommands { "copy /Y \""..filePathAbs.."\" \""..newPath.."\"" }
else
if dir ~= "" then
postbuildcommands { "$(SILENT) {MKDIR} "..dir }
end
postbuildcommands { "$(SILENT) {COPY} "..filePathAbs.." "..newPath }
end
end
function os.capture(cmd, raw)
local f = assert(io.popen(cmd, 'r'))
local s = assert(f:read('*a'))
f:close()
if raw then return s end
s = string.gsub(s, '^%s+', '')
s = string.gsub(s, '%s+$', '')
s = string.gsub(s, '[\n\r]+', ' ')
return s
end
premake.override(premake.vstudio.vc2010, "projectReferences", function(base, prj)
local refs = premake.project.getdependencies(prj, 'linkOnly')
if #refs > 0 then
premake.push('<ItemGroup>')
for _, ref in ipairs(refs) do
local relpath = premake.vstudio.path(prj, premake.vstudio.projectfile(ref))
premake.push('<ProjectReference Include=\"%s\">', relpath)
premake.callArray(premake.vstudio.vc2010.elements.projectReferences, prj, ref)
premake.vstudio.vc2010.element("UseLibraryDependencyInputs", nil, "true")
premake.pop('</ProjectReference>')
end
premake.pop('</ItemGroup>')
end
end)
local hostDepsDir = "_build/host-deps"
local targetDepsDir = "_build/target-deps"
local capnp_gen_path = "source/sdk/extensions/serialization/generated"
local workspace_name = "blast-sdk"
local root = repo_build.get_abs_path(".")
-- Copy headers and licenses
repo_build.prebuild_copy {
{ "include", "_build/%{platform}/%{config}/"..workspace_name.."/include" },
{ "source/sdk/common", "_build/%{platform}/%{config}/"..workspace_name.."/source/sdk/common" },
{ "source/shared/NsFoundation", "_build/%{platform}/%{config}/"..workspace_name.."/source/shared/NsFoundation" },
{ "PACKAGE-LICENSES", "_build/%{platform}/%{config}/"..workspace_name.."/PACKAGE-LICENSES" }
}
-- Preprocess to generate Cap'n Proto files
function capn_proto_precompile_step(dirpath, capnp_files)
local capnp_src = get_abs_path("_build/host-deps/CapnProto/src")
local abs_dir_path = get_abs_path(dirpath)
if os.target() == "windows" then
local capnp_bin = get_abs_path("_build/host-deps/CapnProto/tools/win32"):gsub('/', '\\')
local abs_capnp_gen_path = get_abs_path(capnp_gen_path):gsub('/', '\\')
prebuildcommands { "if not exist "..abs_capnp_gen_path.."\\ mkdir "..abs_capnp_gen_path } -- make the generated source folder
-- capnp compile
for _, filename in pairs(capnp_files) do
prebuildcommands { "if exist "..abs_capnp_gen_path.."\\"..filename..".cpp del /Q "..abs_capnp_gen_path.."\\"..filename..".cpp" }
local command = capnp_bin.."\\capnp.exe compile -o "..capnp_bin.."\\capnpc-c++.exe:"..abs_capnp_gen_path.." -I "..capnp_src.." --src-prefix "..abs_dir_path.." "..abs_dir_path.."/"..filename
prebuildcommands { command }
-- prebuildcommands { "ren "..abs_capnp_gen_path.."\\"..filename..".c++ "..filename..".cpp" }
end
elseif os.target() == "linux" then
local capnp_bin = get_abs_path("_build/host-deps/CapnProto/tools/ubuntu64")
local abs_capnp_gen_path = get_abs_path(capnp_gen_path)
prebuildcommands { "mkdir -p "..abs_capnp_gen_path } -- make the generated source folder
-- capnp compile
for _, filename in pairs(capnp_files) do
local command = capnp_bin.."/capnp compile -o "..capnp_bin.."/capnpc-c++:"..abs_capnp_gen_path.." -I "..capnp_src.." --src-prefix "..abs_dir_path.." "..abs_dir_path.."/"..filename
prebuildcommands { command }
-- prebuildcommands { "mv "..abs_capnp_gen_path.."/"..filename..".c++ "..abs_capnp_gen_path.."/"..filename..".cpp" }
end
end
end
capn_proto_precompile_step("source/sdk/extensions/serialization", {
"NvBlastExtLlSerialization-capn",
"NvBlastExtTkSerialization-capn",
})
-- Custom rule for .c++ files
if os.target() == "linux" then
rule "c++"
fileExtension { ".c++" }
buildoutputs { "$(OBJDIR)/%{file.objname}.o" }
buildmessage '$(notdir $<)'
buildcommands {'$(CXX) %{premake.modules.gmake2.cpp.fileFlags(cfg, file)} $(FORCE_INCLUDE) -o "$@" -MF "$(@:%.o=%.d)" -c "$<"'}
end
-- premake5.lua
workspace (workspace_name)
configurations { "debug", "release" }
startproject "NvBlast"
local targetName = _ACTION
local workspaceDir = "_compiler/"..targetName
-- common dir name to store platform specific files
local platform = "%{cfg.system}-%{cfg.platform}"
local targetDependencyPlatform = "%{cfg.system}-%{cfg.platform}";
local hostDependencyPlatform = _OPTIONS["platform-host"] or targetDependencyPlatform;
local sdkTargetDir = "_build/"..platform.."/%{cfg.buildcfg}/%{wks.name}"
local targetDir = sdkTargetDir.."/bin"
-- defining anything related to the VS or SDK version here because they will most likely be changed in the future..
local msvcInclude = hostDepsDir.."/msvc/VC/Tools/MSVC/14.16.27023/include"
local msvcLibs = hostDepsDir.."/msvc/VC/Tools/MSVC/14.16.27023/lib/onecore/x64"
local sdkInclude = { hostDepsDir.."/winsdk/include/winrt", hostDepsDir.."/winsdk/include/um", hostDepsDir.."/winsdk/include/ucrt", hostDepsDir.."/winsdk/include/shared" }
local sdkLibs = { hostDepsDir.."/winsdk/lib/ucrt/x64", hostDepsDir.."/winsdk/lib/um/x64" }
location (workspaceDir)
targetdir (targetDir)
-- symbolspath ("_build/"..targetName.."/symbols/%{cfg_buildcfg}/%{prj.name}.pdb")
objdir ("_build/tmp/%{cfg.system}/%{prj.name}")
exceptionhandling "On"
rtti "Off"
staticruntime "Off"
flags { "FatalCompileWarnings", "MultiProcessorCompile", "NoPCH", "UndefinedIdentifiers", "NoIncrementalLink" }
cppdialect "C++14"
includedirs { "include" }
characterset( "ASCII" )
defines { "LOG_COMPONENT=\"%{prj.name}\"" }
sysincludedirs { targetDepsDir }
filter { "system:windows" }
platforms { "x86_64" }
symbols "Full"
-- add .editorconfig to all projects so that VS 2017 automatically picks it up
files {".editorconfig"}
editandcontinue "Off"
bindirs { hostDepsDir.."/msvc/VC/Tools/MSVC/14.16.27023/bin/HostX64/x64", hostDepsDir.."/msvc/MSBuild/15.0/bin", hostDepsDir.."/winsdk/bin/x64" }
systemversion "10.0.17763.0"
-- this is for the include and libs from the SDK.
syslibdirs { msvcLibs, sdkLibs }
sysincludedirs { msvcInclude, sdkInclude }
-- all of our source strings and executable strings are utf8
buildoptions {"/utf-8", "/bigobj"}
buildoptions {"/permissive-"}
buildoptions { "/WX" } -- warnings as errors
warnings "Extra"
defines { "_CRT_NONSTDC_NO_DEPRECATE" }
defines { "BOOST_USE_WINDOWS_H=1" }
filter { "system:linux" }
platforms { "x86_64", "aarch64" }
defaultplatform "x86_64"
filter { "system:linux", "platforms:x86_64" }
defines { "_GLIBCXX_USE_CXX11_ABI=0" }
architecture "x86_64"
filter { "system:linux", "platforms:aarch64" }
defines { "_GLIBCXX_USE_CXX11_ABI=1" }
architecture "ARM"
local hostDependencyPlatform = _OPTIONS["platform-host"]
print(hostDependencyPlatform)
-- If cross-compiling, set the toolset explicitly
if (localDependencyPlatform and localDependencyPlatform == "linux-x86_64") then
local toolchain_path = "_build/host-deps/gcc-x86_64"
local toolchain = dofile(toolchain_path .. "/toolchain.lua")
use_gcc_local_toolchain(toolchain_path)
toolset("gcc-local_9_2_0_arch64")
end
filter { "system:linux" }
-- defines { "__STDC_FORMAT_MACROS" }
symbols "On"
buildoptions { "-pthread -fvisibility=hidden -fnon-call-exceptions -D_FILE_OFFSET_BITS=64 -fabi-version=8" }
-- enforces RPATH instead of RUNPATH, needed for ubuntu version > 16.04
linkoptions { "-pthread",
"-Wl,--no-undefined",
"-Wl,--disable-new-dtags",
--"-Wl,--version-script=../../../omniclient.so.ld",
"-Wl,-rpath,'$$ORIGIN' -Wl,--export-dynamic" }
-- add library origin directory to dlopen() search path
enablewarnings { "all", "vla" }
disablewarnings {
"unused-variable",
"switch",
"unused-but-set-variable",
"unused-result",
"deprecated",
"deprecated-declarations",
"unknown-pragmas",
"multichar",
"parentheses",
"nonnull-compare"
}
links { "stdc++fs" }
if repo_build.ccache_path() then
gccprefix (repo_build.ccache_path().." ")
end
filter {}
filter { "system:linux", "configurations:debug" }
buildoptions { "-ggdb", "-g3" }
filter { "system:linux", "configurations:release" }
buildoptions { "-ggdb", "-g2" }
filter { "configurations:debug" }
optimize "Off"
defines { "_DEBUG", "CARB_DEBUG=1" }
filter { "configurations:release" }
defines { "NDEBUG", "CARB_DEBUG=0" }
filter { "configurations:release", "system:windows" }
optimize "Speed"
-- Linux/GCC has some issues on thread exit when the "Speed" optimizations are enabled.
-- We'll leave those off on Linux for the moment.
filter { "configurations:release", "system:linux" }
optimize "On"
filter {}
function blast_sdklib_bare_setup(name)
kind "SharedLib"
location (workspaceDir.."/%{prj.name}")
filter { "system:windows" }
-- defines { "ISOLATION_AWARE_ENABLED=1" }
filter { "system:linux" }
buildoptions { "-fPIC" }
links { "rt" }
filter{}
includedirs {
"source/sdk/common",
"include/"..name,
"source/sdk/"..name,
}
end
function blast_sdklib_common_files()
files {
"source/sdk/common/*.cpp",
}
vpaths {
["common/*"] = "source/sdk/common",
}
end
function blast_sdklib_standard_setup(name)
blast_sdklib_bare_setup(name)
blast_sdklib_common_files()
files {
"source/sdk/"..name.."/*.cpp",
}
vpaths {
["include/*"] = "include/"..name,
["source/*"] = "source/sdk/"..name.."/",
}
end
function link_dependents(names)
libdirs { targetDir }
for _, name in pairs(names) do
dependson {name}
links(name)
end
end
function add_files(rootpath, filenames)
for _, filename in pairs(filenames) do
local file = rootpath.."/"..filename
files { file }
end
end
function add_capn_proto_source()
add_files("_build/host-deps/CapnProto/src/capnp",
{
"arena.c++",
"blob.c++",
"layout.c++",
"message.c++",
"serialize.c++",
}
)
add_files("_build/host-deps/CapnProto/src/kj",
{
"array.c++",
"common.c++",
"debug.c++",
"exception.c++",
"io.c++",
"mutex.c++",
"string.c++",
"units.c++",
}
)
-- cap'n proto source produces a lot of warnings
filter { "system:windows" }
disablewarnings {
"4018", -- 'token' : signed/unsigned mismatch
"4100", -- unreferenced formal parameter
"4189", -- 'identifier' : local variable is initialized but not referenced
"4244", -- conversion from 'type1' to 'type2', possible loss of data
"4245", -- conversion from 'type1' to 'type2', signed/unsigned mismatch
"4267", -- conversion from 'size_t' to 'type', possible loss of data
"4456", -- declaration of 'identifier' hides previous local declaration
"4541", -- 'identifier' used on polymorphic type 'type' with /GR-; unpredictable behavior may result
"4702", -- unreachable code
"4714", -- function 'function' marked as __forceinline not inlined
}
filter { "system:linux"}
disablewarnings {
"undef",
"sign-compare"
}
filter {}
end
group "sdk"
project "NvBlast"
blast_sdklib_standard_setup("lowlevel")
includedirs {
"include/shared/NvFoundation",
}
project "NvBlastGlobals"
blast_sdklib_standard_setup("globals")
includedirs {
"include/lowlevel",
"source/shared/NsFoundation/include",
"include/shared/NvFoundation",
}
project "NvBlastExtShaders"
link_dependents({"NvBlast", "NvBlastGlobals"})
blast_sdklib_standard_setup("extensions/shaders")
includedirs {
"include/lowlevel",
"include/globals",
"include/shared/NvFoundation",
"source/shared/NsFoundation/include",
}
filter { "system:windows" }
disablewarnings {
"4267", -- conversion from 'size_t' to 'type', possible loss of data
}
filter { "system:linux"}
disablewarnings {
"strict-aliasing"
}
filter {}
project "NvBlastExtAssetUtils"
link_dependents({"NvBlast", "NvBlastGlobals"})
blast_sdklib_standard_setup("extensions/assetutils")
includedirs {
"include/lowlevel",
"include/globals",
"include/shared/NvFoundation",
}
project "NvBlastExtAuthoring"
link_dependents({"NvBlast", "NvBlastGlobals"})
blast_sdklib_standard_setup("extensions/authoring")
includedirs {
"include/lowlevel",
"include/globals",
"include/extensions/assetutils",
"include/extensions/authoringCommon",
"source/sdk/extensions/authoring",
"source/sdk/extensions/authoringCommon",
"source/sdk/extensions/authoring/VHACD/inc",
"source/sdk/extensions/authoring/VHACD/public",
"include/shared/NvFoundation",
"source/shared/NsFoundation/include",
target_deps.."/BoostMultiprecision",
}
files {
"source/sdk/extensions/authoringCommon/*.cpp",
"source/sdk/extensions/authoring/VHACD/src/*.cpp",
}
vpaths {
["VHACD/*"] = "source/sdk/extensions/authoring/VHACD/",
["authoringCommon/include/*"] = "include/extensions/authoringCommon/",
["authoringCommon/source/*"] = "source/sdk/extensions/authoringCommon/",
}
filter { "system:windows" }
disablewarnings {
"4244", -- conversion from 'type1' to 'type2', possible loss of data
"4267", -- conversion from 'size_t' to 'type', possible loss of data
}
filter { "system:linux"}
disablewarnings {
"misleading-indentation",
"undef",
"strict-aliasing",
"maybe-uninitialized"
}
filter {}
project "NvBlastTk"
link_dependents({"NvBlast", "NvBlastGlobals"})
blast_sdklib_standard_setup("toolkit")
includedirs {
"include/lowlevel",
"include/globals",
"source/sdk/globals",
"include/shared/NvFoundation",
"source/shared/NsFoundation/include",
"source/shared/NsFileBuffer/include",
"source/shared/NvTask/include",
}
project "NvBlastExtStress"
filter { "system:linux"}
buildoptions { "-march=haswell" }
filter {}
link_dependents({"NvBlast", "NvBlastGlobals"})
blast_sdklib_standard_setup("extensions/stress")
includedirs {
"include/lowlevel",
"include/globals",
"include/shared/NvFoundation",
"source/shared/NsFoundation/include",
"source/shared/stress_solver",
}
files {
"source/shared/stress_solver/stress.cpp",
}
filter { "system:windows" }
disablewarnings {
"4324", -- structure was padded due to alignment specifier
"4505", -- unreferenced local function has been removed
}
filter { "system:linux"}
disablewarnings {
"maybe-uninitialized",
"padded",
"ignored-attributes",
"unused-function"
}
filter {}
project "NvBlastExtSerialization"
filter { "system:linux"}
rules { "c++" }
filter {}
link_dependents({"NvBlast", "NvBlastGlobals"})
blast_sdklib_bare_setup("extensions/serialization")
defines { "KJ_HEADER_WARNINGS=0"}
includedirs {
"source/sdk/extensions/serialization/DTO",
"include/lowlevel",
"source/sdk/lowlevel",
"include/globals",
"_build/host-deps/CapnProto/src",
capnp_gen_path,
"include/shared/NvFoundation",
"source/shared/NsFoundation/include",
"source/shared/NsFileBuffer/include",
"source/shared/NvTask/include",
}
blast_sdklib_common_files()
add_files("source/sdk/extensions/serialization",
{
"NvBlastExtSerialization.cpp",
"NvBlastExtLlSerialization.cpp",
"NvBlastExtOutputStream.cpp",
"NvBlastExtInputStream.cpp",
}
)
add_files("source/sdk/extensions/serialization/DTO",
{
"ActorDTO.cpp",
"AssetDTO.cpp",
"FamilyDTO.cpp",
"FamilyGraphDTO.cpp",
"NvBlastChunkDTO.cpp",
"NvBlastBondDTO.cpp",
"NvBlastIDDTO.cpp",
}
)
add_capn_proto_source()
add_files(capnp_gen_path,
{ "NvBlastExtLlSerialization-capn.c++" }
)
vpaths {
["include/*"] = "include/extensions/serialization/",
["source/*"] = "source/sdk/extensions/serialization/",
}
filter {}
project "NvBlastExtTkSerialization"
filter { "system:linux"}
rules { "c++" }
filter {}
dependson {"NvBlastExtSerialization"}
link_dependents({"NvBlast", "NvBlastGlobals", "NvBlastTk"})
blast_sdklib_bare_setup("extensions/serialization")
defines { "KJ_HEADER_WARNINGS=0"}
includedirs {
"source/sdk/extensions/serialization/DTO",
"include/lowlevel",
"include/toolkit",
"source/sdk/lowlevel",
"include/globals",
"_build/host-deps/CapnProto/src",
capnp_gen_path,
"include/shared/NvFoundation",
"source/shared/NsFoundation/include",
"source/shared/NsFileBuffer/include",
"source/shared/NvTask/include",
}
blast_sdklib_common_files()
add_files("source/sdk/extensions/serialization",
{
"NvBlastExtTkSerialization.cpp",
"NvBlastExtTkSerializerRAW.cpp",
"NvBlastExtOutputStream.cpp",
"NvBlastExtInputStream.cpp",
}
)
add_files("source/sdk/extensions/serialization/DTO",
{
"AssetDTO.cpp",
"TkAssetDTO.cpp",
"NvVec3DTO.cpp",
"NvBlastChunkDTO.cpp",
"NvBlastBondDTO.cpp",
"NvBlastIDDTO.cpp",
"TkAssetJointDescDTO.cpp",
}
)
add_capn_proto_source()
add_files(capnp_gen_path,
{
"NvBlastExtLlSerialization-capn.c++",
"NvBlastExtTkSerialization-capn.c++",
}
)
vpaths {
["include/*"] = "include/extensions/serialization/",
["source/*"] = "source/sdk/extensions/serialization/",
}
group "tests"
project "UnitTests"
kind "ConsoleApp"
location (workspaceDir.."/%{prj.name}")
link_dependents({"NvBlast", "NvBlastGlobals", "NvBlastExtAssetUtils", "NvBlastExtShaders", "NvBlastTk", "NvBlastExtSerialization", "NvBlastExtTkSerialization"})
filter { "system:windows" }
-- defines { "ISOLATION_AWARE_ENABLED=1" }
filter { "system:linux" }
buildoptions { "-fPIC" }
links { "rt" }
filter{}
blast_sdklib_common_files()
add_files("source/test/src/unit", {
"AssetTests.cpp",
"ActorTests.cpp",
"APITests.cpp",
"CoreTests.cpp",
"FamilyGraphTests.cpp",
"MultithreadingTests.cpp",
"TkCompositeTests.cpp",
"TkTests.cpp",
})
add_files("source/test/src/utils", {
"TestAssets.cpp",
})
add_files("source/sdk/lowlevel", {
"NvBlastActor.cpp",
"NvBlastFamilyGraph.cpp",
"NvBlastActorSerializationBlock.cpp",
"NvBlastAsset.cpp",
"NvBlastFamily.cpp",
})
add_files("source/sdk/toolkit", {
"NvBlastTkTaskManager.cpp",
})
add_files("source/shared/utils", {
"AssetGenerator.cpp",
})
includedirs {
"include/globals",
"include/lowlevel",
"include/toolkit",
"include/extensions/assetutils",
"include/extensions/shaders",
"include/extensions/serialization",
"source/sdk/common",
"source/sdk/globals",
"source/sdk/lowlevel",
"source/sdk/extensions/serialization",
"source/test/src",
"source/test/src/unit",
"source/test/src/utils",
"source/shared/filebuf/include",
"source/shared/utils",
"include/shared/NvFoundation",
"source/shared/NsFoundation/include",
"source/shared/NsFileBuffer/include",
"source/shared/NvTask/include",
target_deps.."/googletest/include",
}
filter { "system:windows", "configurations:debug" }
libdirs { target_deps.."/googletest/lib/vc14win64-cmake/Debug" }
filter { "system:windows", "configurations:release" }
libdirs { target_deps.."/googletest/lib/vc14win64-cmake/Release" }
filter { "system:linux" }
libdirs { target_deps.."/googletest/lib/gcc-4.8" }
filter{}
links { "gtest_main", "gtest" }
filter { "system:windows" }
-- links { "PhysXFoundation_64", "PhysXTask_static_64" }
disablewarnings {
"4002", -- too many actual parameters for macro 'identifier'
"4100", -- unreferenced formal parameter
"4127", -- conditional expression is constant
"4189", -- 'identifier' : local variable is initialized but not referenced
"4244", -- conversion from 'type1' to 'type2', possible loss of data
"4456", -- declaration of 'identifier' hides previous local declaration
"4996", -- code uses a function, class member, variable, or typedef that's marked deprecated
}
filter { "system:linux"}
-- links { "PhysXFoundation_static_64" }
disablewarnings {
"undef",
"sign-compare"
}
filter {} |
NVIDIA-Omniverse/PhysX/blast/README.md | # Blast SDK Repo
Online documentation may be found here: [Blast SDK Documentation](https://nvidia-omniverse.github.io/PhysX/blast/index.html).
## Building the SDK
### Windows
1. run `build.bat`
2. built sdk location: `_build\windows-x86_64\release\blast-sdk` (release), `_build\windows-x86_64\debug\blast-sdk` (debug)
### Linux
0. initialize (once): run `./setup.sh`
1. run `./build.sh`
2. built sdk location: `_build/linux-x86_64/release/blast-sdk` (release), `_build/linux-x86_64/debug/blast-sdk` (debug)
|
NVIDIA-Omniverse/PhysX/blast/setup.sh | #!/usr/bin/env bash
set -e
set -u
# Tested on bare installs of:
# - Ubuntu 18.04 / bionic
# - Ubuntu 16.04 / xenial
# - CentOS7
install_hostdeps_ubuntu() {
# Historically we take care of installing these deps for devs. We really
# shouldn't be doing this given that installing some of these deps will
# potentially uninstall other things (competing python versions for
# example)
echo "Warning: about to run potentially destructive apt-get commands."
echo " waiting 5 seconds..."
sleep 5
sudo apt-get update
sudo apt-get install -y python2.7 curl
}
install_hostdeps_centos() {
# libatomic needed by streamsdk at runtime
sudo yum install -y libatomic
}
do_usermod_and_end() {
# $USER can be unset, $(whoami) works more reliably.
sudo usermod -aG docker $(whoami)
echo "You need to log out and back in for your environment to pick up 'docker' group membership."
sleep 3
echo "Attempting to force group membership reload for this shell. You may be prompted for your account password."
set -x
exec su --login $(whoami)
}
install_docker_ubuntu() {
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu ${VERSION_CODENAME} stable"
sudo apt-get update
sudo apt-get install -y docker-ce
do_usermod_and_end
}
install_docker_centos() {
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install -y docker-ce
sudo systemctl start docker
sudo systemctl enable docker
do_usermod_and_end
}
main() {
sudo -V >& /dev/null && HAVE_SUDO=1 || HAVE_SUDO=0
if [[ "$HAVE_SUDO" == "0" ]]; then
echo "Install 'sudo' before running this script."
exit 1
fi
local DOCKER=$(which docker >& /dev/null)
if [[ -f /etc/os-release ]]; then
. /etc/os-release
if [[ "x$NAME" == "xUbuntu" ]]; then
install_hostdeps_ubuntu
[[ "x$DOCKER" = "x" ]] && install_docker_ubuntu
elif [[ "x$NAME" == "xCentOS Linux" ]]; then
install_hostdeps_centos
[[ "x$DOCKER" = "x" ]] && install_docker_centos
fi
else
echo "Unable to determine distribution. Can't read /etc/os-release" | tee /dev/stderr
exit 1
fi
}
# Skip trying to install system packages on TeamCity agents. That's bad.
[[ "x${TEAMCITY_BUILDCONF_NAME:-}" = "x" ]] && main || echo "TeamCity! Skipping setup."
|
NVIDIA-Omniverse/PhysX/blast/deps/target-deps.packman.xml | <project toolsVersion="5.0">
<dependency name="python27" linkPath="../_build/target-deps/python27">
<package name="python" version="2.7.14-windows-x64-1" platforms="windows-x86_64"/>
</dependency>
<dependency name="python36" linkPath="../_build/target-deps/python36">
<package name="python" version="3.6.7-windows-x86_64" platforms="windows-x86_64"/>
<package name="python" version="3.6.5-linux-x64" platforms="linux-x86_64"/>
<package name="python" version="3.6.8-34.a6e9b99d-linux-aarch64" platforms="linux-aarch64"/>
</dependency>
<dependency name="python37" linkPath="../_build/target-deps/python37">
<package name="python" version="3.7.9-windows-x86_64" platforms="windows-x86_64" />
<package name="python" version="3.7.9-173.e9ee4ea0-${platform}" platforms="linux-x86_64 linux-aarch64" />
</dependency>
<dependency name="doxygen" linkPath="_dependencies/doxygen">
<package name="doxygen" version="1.8.5-windows-x86_64" platforms="windows-x86_64" />
</dependency>
<dependency name="BoostMultiprecision" linkPath="../_build/target-deps/BoostMultiprecision">
<package name="BoostMultiprecision" version="1.64.0.1" platforms="windows-x86_64 linux-x86_64"/>
</dependency>
<dependency name="googletest" linkPath="../_build/target-deps/googletest">
<package name="googletest-win" version="1.4.0.2" platforms="windows-x86_64"/>
<package name="googletest-linux-x86_64" version="1.8.0.1" platforms="linux-x86_64"/>
</dependency>
</project>
|
NVIDIA-Omniverse/PhysX/blast/deps/repo-deps.packman.xml | <project toolsVersion="5.0">
<dependency name="repo_man" linkPath="../_repo/deps/repo_man">
<package name="repo_man" version="1.48.1" />
</dependency>
<dependency name="repo_build" linkPath="../_repo/deps/repo_build" tags="non-redist">
<package name="repo_build" version="0.55.3" />
</dependency>
</project>
|
NVIDIA-Omniverse/PhysX/blast/deps/host-deps.packman.xml | <project toolsVersion="6.11">
<dependency name="premake" linkPath="../_build/host-deps/premake">
<package name="premake" version="5.0.9-nv-main-68e9a88a-${platform}" />
</dependency>
<dependency name="msvc" linkPath="../_build/host-deps/msvc">
<package name="msvc" version="2017-15.9.17-1" platforms="windows-x86_64" />
</dependency>
<dependency name="winsdk" linkPath="../_build/host-deps/winsdk">
<package name="winsdk" version="10.17763" platforms="windows-x86_64"/>
</dependency>
<dependency name="llvm" linkPath="../_build/host-deps/llvm">
<package name="llvm" version="6.0.0-linux-x86_64" platforms="linux-x86_64 linux-aarch64"/>
</dependency>
<dependency name="gcc_x64" linkPath="../_build/host-deps/gcc-x86_64">
<package name="gcc" version="9.2.0-binutils-2.30-x86_64-pc-linux-gnu-2" platforms="linux-x86_64"/>
</dependency>
<dependency name="gcc_aarch64" linkPath="../_build/host-deps/gcc-aarch64">
<package name="gcc" version="9.2.0-aarch64-pc-linux-gnu" platforms="linux-aarch64"/>
</dependency>
<dependency name="mirror" linkPath="../_build/host-deps/mirror">
<package name="mirror" version="0.1.110-a2df2ebd-windows-x86_64" platforms="windows-x86_64" />
<package name="mirror" version="0.1.100-81448125-linux-x86_64" platforms="linux-x86_64" />
<package name="mirror" version="0.1.100-dev-linux-aarch64" platforms="linux-aarch64" />
</dependency>
<dependency name="linbuild" linkPath="../_build/host-deps/linbuild">
<package name="linbuild" version="1.10.112-044606b-aarch64" platforms="linux-aarch64" />
<package name="linbuild" version="1.10.112-044606b-x86_64" platforms="linux-x86_64" />
</dependency>
<!-- <dependency name="omnitrace_tools" linkPath="../_build/host-deps/omni-trace-tools">
<package name="omnitrace-tools" version="0.5.62a10f54-$platform-release-dev"/>
</dependency> -->
<dependency name="CapnProto" linkPath="../_build/host-deps/CapnProto">
<package name="CapnProto" version="0.6.1.4" platforms="windows-x86_64 linux-x86_64" />
</dependency>
</project>
|
NVIDIA-Omniverse/PhysX/blast/tools/repoman/repoman.py | import os
import sys
import io
import contextlib
import packmanapi
REPO_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../..")
REPO_DEPS_FILE = os.path.join(REPO_ROOT, "deps/repo-deps.packman.xml")
def bootstrap():
"""
Bootstrap all omni.repo modules.
Pull with packman from repo.packman.xml and add them all to python sys.path to enable importing.
"""
#with contextlib.redirect_stdout(io.StringIO()):
deps = packmanapi.pull(REPO_DEPS_FILE)
for dep_path in deps.values():
if dep_path not in sys.path:
sys.path.append(dep_path)
if __name__ == "__main__":
bootstrap()
import omni.repo.man
omni.repo.man.main(REPO_ROOT)
|
NVIDIA-Omniverse/PhysX/blast/tools/packman/python.sh | #!/bin/bash
# Copyright 2019-2020 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
PACKMAN_CMD="$(dirname "${BASH_SOURCE}")/packman"
if [ ! -f "$PACKMAN_CMD" ]; then
PACKMAN_CMD="${PACKMAN_CMD}.sh"
fi
source "$PACKMAN_CMD" init
export PYTHONPATH="${PM_MODULE_DIR}:${PYTHONPATH}"
if [ -z "${PYTHONNOUSERSITE:-}" ]; then
export PYTHONNOUSERSITE=1
fi
# For performance, default to unbuffered; however, allow overriding via
# PYTHONUNBUFFERED=0 since PYTHONUNBUFFERED on windows can truncate output
# when printing long strings
if [ -z "${PYTHONUNBUFFERED:-}" ]; then
export PYTHONUNBUFFERED=1
fi
# workaround for our python not shipping with certs
if [[ -z ${SSL_CERT_DIR:-} ]]; then
export SSL_CERT_DIR=/etc/ssl/certs/
fi
"${PM_PYTHON}" "$@"
|
NVIDIA-Omniverse/PhysX/blast/tools/packman/python.bat | :: Copyright 2019-2020 NVIDIA CORPORATION
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
@echo off
setlocal enableextensions
call "%~dp0\packman" init
set "PYTHONPATH=%PM_MODULE_DIR%;%PYTHONPATH%"
if not defined PYTHONNOUSERSITE (
set PYTHONNOUSERSITE=1
)
REM For performance, default to unbuffered; however, allow overriding via
REM PYTHONUNBUFFERED=0 since PYTHONUNBUFFERED on windows can truncate output
REM when printing long strings
if not defined PYTHONUNBUFFERED (
set PYTHONUNBUFFERED=1
)
"%PM_PYTHON%" %* |
NVIDIA-Omniverse/PhysX/blast/tools/packman/packmanconf.py | # Use this file to bootstrap packman into your Python environment (3.7.x). Simply
# add the path by doing sys.insert to where packmanconf.py is located and then execute:
#
# >>> import packmanconf
# >>> packmanconf.init()
#
# It will use the configured remote(s) and the version of packman in the same folder,
# giving you full access to the packman API via the following module
#
# >> import packmanapi
# >> dir(packmanapi)
import os
import platform
import sys
def init():
"""Call this function to initialize the packman configuration.
Calls to the packman API will work after successfully calling this function.
Note:
This function only needs to be called once during the execution of your
program. Calling it repeatedly is harmless but wasteful.
Compatibility with your Python interpreter is checked and upon failure
the function will report what is required.
Example:
>>> import packmanconf
>>> packmanconf.init()
>>> import packmanapi
>>> packmanapi.set_verbosity_level(packmanapi.VERBOSITY_HIGH)
"""
major = sys.version_info[0]
minor = sys.version_info[1]
if major != 3 or minor != 10:
raise RuntimeError(
f"This version of packman requires Python 3.10.x, but {major}.{minor} was provided"
)
conf_dir = os.path.dirname(os.path.abspath(__file__))
os.environ["PM_INSTALL_PATH"] = conf_dir
packages_root = get_packages_root(conf_dir)
version = get_version(conf_dir)
module_dir = get_module_dir(conf_dir, packages_root, version)
sys.path.insert(1, module_dir)
def get_packages_root(conf_dir: str) -> str:
root = os.getenv("PM_PACKAGES_ROOT")
if not root:
platform_name = platform.system()
if platform_name == "Windows":
drive, _ = os.path.splitdrive(conf_dir)
root = os.path.join(drive, "packman-repo")
elif platform_name == "Darwin":
# macOS
root = os.path.join(
os.path.expanduser("~"), "Library/Application Support/packman-cache"
)
elif platform_name == "Linux":
try:
cache_root = os.environ["XDG_HOME_CACHE"]
except KeyError:
cache_root = os.path.join(os.path.expanduser("~"), ".cache")
return os.path.join(cache_root, "packman")
else:
raise RuntimeError(f"Unsupported platform '{platform_name}'")
# make sure the path exists:
os.makedirs(root, exist_ok=True)
return root
def get_module_dir(conf_dir, packages_root: str, version: str) -> str:
module_dir = os.path.join(packages_root, "packman-common", version)
if not os.path.exists(module_dir):
import tempfile
tf = tempfile.NamedTemporaryFile(delete=False)
target_name = tf.name
tf.close()
url = f"http://bootstrap.packman.nvidia.com/packman-common@{version}.zip"
print(f"Downloading '{url}' ...")
import urllib.request
urllib.request.urlretrieve(url, target_name)
from importlib.machinery import SourceFileLoader
# import module from path provided
script_path = os.path.join(conf_dir, "bootstrap", "install_package.py")
ip = SourceFileLoader("install_package", script_path).load_module()
print("Unpacking ...")
ip.install_package(target_name, module_dir)
os.unlink(tf.name)
return module_dir
def get_version(conf_dir: str):
path = os.path.join(conf_dir, "packman")
if not os.path.exists(path): # in dev repo fallback
path += ".sh"
with open(path, "rt", encoding="utf8") as launch_file:
for line in launch_file.readlines():
if line.startswith("PM_PACKMAN_VERSION"):
_, value = line.split("=")
return value.strip()
raise RuntimeError(f"Unable to find 'PM_PACKMAN_VERSION' in '{path}'")
|
NVIDIA-Omniverse/PhysX/blast/tools/packman/packman.cmd | :: RUN_PM_MODULE must always be at the same spot for packman update to work (batch reloads file during update!)
:: [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx]
:: Reset errorlevel status (don't inherit from caller)
@call :ECHO_AND_RESET_ERROR
:: You can remove this section if you do your own manual configuration of the dev machines
call :CONFIGURE
if %errorlevel% neq 0 ( exit /b %errorlevel% )
:: Everything below is mandatory
if not defined PM_PYTHON goto :PYTHON_ENV_ERROR
if not defined PM_MODULE goto :MODULE_ENV_ERROR
set PM_VAR_PATH_ARG=
if "%1"=="pull" goto :SET_VAR_PATH
if "%1"=="install" goto :SET_VAR_PATH
:RUN_PM_MODULE
"%PM_PYTHON%" -S -s -u -E "%PM_MODULE%" %* %PM_VAR_PATH_ARG%
if %errorlevel% neq 0 ( exit /b %errorlevel% )
:: Marshall environment variables into the current environment if they have been generated and remove temporary file
if exist "%PM_VAR_PATH%" (
for /F "usebackq tokens=*" %%A in ("%PM_VAR_PATH%") do set "%%A"
)
if %errorlevel% neq 0 ( goto :VAR_ERROR )
if exist "%PM_VAR_PATH%" (
del /F "%PM_VAR_PATH%"
)
if %errorlevel% neq 0 ( goto :VAR_ERROR )
set PM_VAR_PATH=
goto :eof
:: Subroutines below
:PYTHON_ENV_ERROR
@echo User environment variable PM_PYTHON is not set! Please configure machine for packman or call configure.bat.
exit /b 1
:MODULE_ENV_ERROR
@echo User environment variable PM_MODULE is not set! Please configure machine for packman or call configure.bat.
exit /b 1
:VAR_ERROR
@echo Error while processing and setting environment variables!
exit /b 1
:: pad [xxxx]
:ECHO_AND_RESET_ERROR
@echo off
if /I "%PM_VERBOSITY%"=="debug" (
@echo on
)
exit /b 0
:SET_VAR_PATH
:: Generate temporary path for variable file
for /f "delims=" %%a in ('%PM_PYTHON% -S -s -u -E -c "import tempfile;file = tempfile.NamedTemporaryFile(mode='w+t', delete=False);print(file.name)"') do (set PM_VAR_PATH=%%a)
set PM_VAR_PATH_ARG=--var-path="%PM_VAR_PATH%"
goto :RUN_PM_MODULE
:CONFIGURE
:: Must capture and set code page to work around issue #279, powershell invocation mutates console font
:: This issue only happens in Windows CMD shell when using 65001 code page. Some Git Bash implementations
:: don't support chcp so this workaround is a bit convoluted.
:: Test for chcp:
chcp > nul 2>&1
if %errorlevel% equ 0 (
for /f "tokens=2 delims=:" %%a in ('chcp') do (set PM_OLD_CODE_PAGE=%%a)
) else (
call :ECHO_AND_RESET_ERROR
)
:: trim leading space (this is safe even when PM_OLD_CODE_PAGE has not been set)
set PM_OLD_CODE_PAGE=%PM_OLD_CODE_PAGE:~1%
if "%PM_OLD_CODE_PAGE%" equ "65001" (
chcp 437 > nul
set PM_RESTORE_CODE_PAGE=1
)
call "%~dp0\bootstrap\configure.bat"
set PM_CONFIG_ERRORLEVEL=%errorlevel%
if defined PM_RESTORE_CODE_PAGE (
:: Restore code page
chcp %PM_OLD_CODE_PAGE% > nul
)
set PM_OLD_CODE_PAGE=
set PM_RESTORE_CODE_PAGE=
exit /b %PM_CONFIG_ERRORLEVEL%
|
NVIDIA-Omniverse/PhysX/blast/tools/packman/config.packman.xml | <config remotes="cloudfront">
<remote2 name="cloudfront">
<transport actions="download" protocol="http" packageLocation="d4i3qtqj3r0z5.cloudfront.net/${name}@${version}" />
<transport actions="upload" protocol="s3" packageLocation="packages-for-cloudfront" />
<transport actions="list" protocol="http" packageLocation="omnipackages.nvidia.com/api/v1/list/cloudfront" />
</remote2>
</config>
|
NVIDIA-Omniverse/PhysX/blast/tools/packman/bootstrap/generate_temp_file_name.ps1 | <#
Copyright 2019 NVIDIA CORPORATION
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#>
$out = [System.IO.Path]::GetTempFileName()
Write-Host $out
# SIG # Begin signature block
# MIIaVwYJKoZIhvcNAQcCoIIaSDCCGkQCAQExDzANBglghkgBZQMEAgEFADB5Bgor
# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG
# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCAK+Ewup1N0/mdf
# 1l4R58rxyumHgZvTmEhrYTb2Zf0zd6CCCiIwggTTMIIDu6ADAgECAhBi50XpIWUh
# PJcfXEkK6hKlMA0GCSqGSIb3DQEBCwUAMIGEMQswCQYDVQQGEwJVUzEdMBsGA1UE
# ChMUU3ltYW50ZWMgQ29ycG9yYXRpb24xHzAdBgNVBAsTFlN5bWFudGVjIFRydXN0
# IE5ldHdvcmsxNTAzBgNVBAMTLFN5bWFudGVjIENsYXNzIDMgU0hBMjU2IENvZGUg
# U2lnbmluZyBDQSAtIEcyMB4XDTE4MDcwOTAwMDAwMFoXDTIxMDcwOTIzNTk1OVow
# gYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRQwEgYDVQQHDAtT
# YW50YSBDbGFyYTEbMBkGA1UECgwSTlZJRElBIENvcnBvcmF0aW9uMQ8wDQYDVQQL
# DAZJVC1NSVMxGzAZBgNVBAMMEk5WSURJQSBDb3Jwb3JhdGlvbjCCASIwDQYJKoZI
# hvcNAQEBBQADggEPADCCAQoCggEBALEZN63dA47T4i90jZ84CJ/aWUwVtLff8AyP
# YspFfIZGdZYiMgdb8A5tBh7653y0G/LZL6CVUkgejcpvBU/Dl/52a+gSWy2qJ2bH
# jMFMKCyQDhdpCAKMOUKSC9rfzm4cFeA9ct91LQCAait4LhLlZt/HF7aG+r0FgCZa
# HJjJvE7KNY9G4AZXxjSt8CXS8/8NQMANqjLX1r+F+Hl8PzQ1fVx0mMsbdtaIV4Pj
# 5flAeTUnz6+dCTx3vTUo8MYtkS2UBaQv7t7H2B7iwJDakEQKk1XHswJdeqG0osDU
# z6+NVks7uWE1N8UIhvzbw0FEX/U2kpfyWaB/J3gMl8rVR8idPj8CAwEAAaOCAT4w
# ggE6MAkGA1UdEwQCMAAwDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUF
# BwMDMGEGA1UdIARaMFgwVgYGZ4EMAQQBMEwwIwYIKwYBBQUHAgEWF2h0dHBzOi8v
# ZC5zeW1jYi5jb20vY3BzMCUGCCsGAQUFBwICMBkMF2h0dHBzOi8vZC5zeW1jYi5j
# b20vcnBhMB8GA1UdIwQYMBaAFNTABiJJ6zlL3ZPiXKG4R3YJcgNYMCsGA1UdHwQk
# MCIwIKAeoByGGmh0dHA6Ly9yYi5zeW1jYi5jb20vcmIuY3JsMFcGCCsGAQUFBwEB
# BEswSTAfBggrBgEFBQcwAYYTaHR0cDovL3JiLnN5bWNkLmNvbTAmBggrBgEFBQcw
# AoYaaHR0cDovL3JiLnN5bWNiLmNvbS9yYi5jcnQwDQYJKoZIhvcNAQELBQADggEB
# AIJKh5vKJdhHJtMzATmc1BmXIQ3RaJONOZ5jMHn7HOkYU1JP0OIzb4pXXkH8Xwfr
# K6bnd72IhcteyksvKsGpSvK0PBBwzodERTAu1Os2N+EaakxQwV/xtqDm1E3IhjHk
# fRshyKKzmFk2Ci323J4lHtpWUj5Hz61b8gd72jH7xnihGi+LORJ2uRNZ3YuqMNC3
# SBC8tAyoJqEoTJirULUCXW6wX4XUm5P2sx+htPw7szGblVKbQ+PFinNGnsSEZeKz
# D8jUb++1cvgTKH59Y6lm43nsJjkZU77tNqyq4ABwgQRk6lt8cS2PPwjZvTmvdnla
# ZhR0K4of+pQaUQHXVIBdji8wggVHMIIEL6ADAgECAhB8GzU1SufbdOdBXxFpymuo
# MA0GCSqGSIb3DQEBCwUAMIG9MQswCQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNp
# Z24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNV
# BAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
# IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmlj
# YXRpb24gQXV0aG9yaXR5MB4XDTE0MDcyMjAwMDAwMFoXDTI0MDcyMTIzNTk1OVow
# gYQxCzAJBgNVBAYTAlVTMR0wGwYDVQQKExRTeW1hbnRlYyBDb3Jwb3JhdGlvbjEf
# MB0GA1UECxMWU3ltYW50ZWMgVHJ1c3QgTmV0d29yazE1MDMGA1UEAxMsU3ltYW50
# ZWMgQ2xhc3MgMyBTSEEyNTYgQ29kZSBTaWduaW5nIENBIC0gRzIwggEiMA0GCSqG
# SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDXlUPU3N9nrjn7UqS2JjEEcOm3jlsqujdp
# NZWPu8Aw54bYc7vf69F2P4pWjustS/BXGE6xjaUz0wt1I9VqeSfdo9P3Dodltd6t
# HPH1NbQiUa8iocFdS5B/wFlOq515qQLXHkmxO02H/sJ4q7/vUq6crwjZOeWaUT5p
# XzAQTnFjbFjh8CAzGw90vlvLEuHbjMSAlHK79kWansElC/ujHJ7YpglwcezAR0yP
# fcPeGc4+7gRyjhfT//CyBTIZTNOwHJ/+pXggQnBBsCaMbwDIOgARQXpBsKeKkQSg
# mXj0d7TzYCrmbFAEtxRg/w1R9KiLhP4h2lxeffUpeU+wRHRvbXL/AgMBAAGjggF4
# MIIBdDAuBggrBgEFBQcBAQQiMCAwHgYIKwYBBQUHMAGGEmh0dHA6Ly9zLnN5bWNk
# LmNvbTASBgNVHRMBAf8ECDAGAQH/AgEAMGYGA1UdIARfMF0wWwYLYIZIAYb4RQEH
# FwMwTDAjBggrBgEFBQcCARYXaHR0cHM6Ly9kLnN5bWNiLmNvbS9jcHMwJQYIKwYB
# BQUHAgIwGRoXaHR0cHM6Ly9kLnN5bWNiLmNvbS9ycGEwNgYDVR0fBC8wLTAroCmg
# J4YlaHR0cDovL3Muc3ltY2IuY29tL3VuaXZlcnNhbC1yb290LmNybDATBgNVHSUE
# DDAKBggrBgEFBQcDAzAOBgNVHQ8BAf8EBAMCAQYwKQYDVR0RBCIwIKQeMBwxGjAY
# BgNVBAMTEVN5bWFudGVjUEtJLTEtNzI0MB0GA1UdDgQWBBTUwAYiSes5S92T4lyh
# uEd2CXIDWDAfBgNVHSMEGDAWgBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG
# 9w0BAQsFAAOCAQEAf+vKp+qLdkLrPo4gVDDjt7nc+kg+FscPRZUQzSeGo2bzAu1x
# +KrCVZeRcIP5Un5SaTzJ8eCURoAYu6HUpFam8x0AkdWG80iH4MvENGggXrTL+QXt
# nK9wUye56D5+UaBpcYvcUe2AOiUyn0SvbkMo0yF1u5fYi4uM/qkERgSF9xWcSxGN
# xCwX/tVuf5riVpLxlrOtLfn039qJmc6yOETA90d7yiW5+ipoM5tQct6on9TNLAs0
# vYsweEDgjY4nG5BvGr4IFYFd6y/iUedRHsl4KeceZb847wFKAQkkDhbEFHnBQTc0
# 0D2RUpSd4WjvCPDiaZxnbpALGpNx1CYCw8BaIzGCD4swgg+HAgEBMIGZMIGEMQsw
# CQYDVQQGEwJVUzEdMBsGA1UEChMUU3ltYW50ZWMgQ29ycG9yYXRpb24xHzAdBgNV
# BAsTFlN5bWFudGVjIFRydXN0IE5ldHdvcmsxNTAzBgNVBAMTLFN5bWFudGVjIENs
# YXNzIDMgU0hBMjU2IENvZGUgU2lnbmluZyBDQSAtIEcyAhBi50XpIWUhPJcfXEkK
# 6hKlMA0GCWCGSAFlAwQCAQUAoHwwEAYKKwYBBAGCNwIBDDECMAAwGQYJKoZIhvcN
# AQkDMQwGCisGAQQBgjcCAQQwHAYKKwYBBAGCNwIBCzEOMAwGCisGAQQBgjcCARUw
# LwYJKoZIhvcNAQkEMSIEIPW+EpFrZSdzrjFFo0UT+PzFeYn/GcWNyWFaU/JMrMfR
# MA0GCSqGSIb3DQEBAQUABIIBAA8fmU/RJcF9t60DZZAjf8FB3EZddOaHgI9z40nV
# CnfTGi0OEYU48Pe9jkQQV2fABpACfW74xmNv3QNgP2qP++mkpKBVv28EIAuINsFt
# YAITEljLN/VOVul8lvjxar5GSFFgpE5F6j4xcvI69LuCWbN8cteTVsBGg+eGmjfx
# QZxP252z3FqPN+mihtFegF2wx6Mg6/8jZjkO0xjBOwSdpTL4uyQfHvaPBKXuWxRx
# ioXw4ezGAwkuBoxWK8UG7Qu+7CSfQ3wMOjvyH2+qn30lWEsvRMdbGAp7kvfr3EGZ
# a3WN7zXZ+6KyZeLeEH7yCDzukAjptaY/+iLVjJsuzC6tCSqhgg1EMIINQAYKKwYB
# BAGCNwMDATGCDTAwgg0sBgkqhkiG9w0BBwKggg0dMIINGQIBAzEPMA0GCWCGSAFl
# AwQCAQUAMHcGCyqGSIb3DQEJEAEEoGgEZjBkAgEBBglghkgBhv1sBwEwMTANBglg
# hkgBZQMEAgEFAAQg14BnPazQkW9whhZu1d0bC3lqqScvxb3SSb1QT8e3Xg0CEFhw
# aMBZ2hExXhr79A9+bXEYDzIwMjEwNDA4MDkxMTA5WqCCCjcwggT+MIID5qADAgEC
# AhANQkrgvjqI/2BAIc4UAPDdMA0GCSqGSIb3DQEBCwUAMHIxCzAJBgNVBAYTAlVT
# MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
# b20xMTAvBgNVBAMTKERpZ2lDZXJ0IFNIQTIgQXNzdXJlZCBJRCBUaW1lc3RhbXBp
# bmcgQ0EwHhcNMjEwMTAxMDAwMDAwWhcNMzEwMTA2MDAwMDAwWjBIMQswCQYDVQQG
# EwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xIDAeBgNVBAMTF0RpZ2lDZXJ0
# IFRpbWVzdGFtcCAyMDIxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
# wuZhhGfFivUNCKRFymNrUdc6EUK9CnV1TZS0DFC1JhD+HchvkWsMlucaXEjvROW/
# m2HNFZFiWrj/ZwucY/02aoH6KfjdK3CF3gIY83htvH35x20JPb5qdofpir34hF0e
# dsnkxnZ2OlPR0dNaNo/Go+EvGzq3YdZz7E5tM4p8XUUtS7FQ5kE6N1aG3JMjjfdQ
# Jehk5t3Tjy9XtYcg6w6OLNUj2vRNeEbjA4MxKUpcDDGKSoyIxfcwWvkUrxVfbENJ
# Cf0mI1P2jWPoGqtbsR0wwptpgrTb/FZUvB+hh6u+elsKIC9LCcmVp42y+tZji06l
# chzun3oBc/gZ1v4NSYS9AQIDAQABo4IBuDCCAbQwDgYDVR0PAQH/BAQDAgeAMAwG
# A1UdEwEB/wQCMAAwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwgwQQYDVR0gBDowODA2
# BglghkgBhv1sBwEwKTAnBggrBgEFBQcCARYbaHR0cDovL3d3dy5kaWdpY2VydC5j
# b20vQ1BTMB8GA1UdIwQYMBaAFPS24SAd/imu0uRhpbKiJbLIFzVuMB0GA1UdDgQW
# BBQ2RIaOpLqwZr68KC0dRDbd42p6vDBxBgNVHR8EajBoMDKgMKAuhixodHRwOi8v
# Y3JsMy5kaWdpY2VydC5jb20vc2hhMi1hc3N1cmVkLXRzLmNybDAyoDCgLoYsaHR0
# cDovL2NybDQuZGlnaWNlcnQuY29tL3NoYTItYXNzdXJlZC10cy5jcmwwgYUGCCsG
# AQUFBwEBBHkwdzAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQuY29t
# ME8GCCsGAQUFBzAChkNodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNl
# cnRTSEEyQXNzdXJlZElEVGltZXN0YW1waW5nQ0EuY3J0MA0GCSqGSIb3DQEBCwUA
# A4IBAQBIHNy16ZojvOca5yAOjmdG/UJyUXQKI0ejq5LSJcRwWb4UoOUngaVNFBUZ
# B3nw0QTDhtk7vf5EAmZN7WmkD/a4cM9i6PVRSnh5Nnont/PnUp+Tp+1DnnvntN1B
# Ion7h6JGA0789P63ZHdjXyNSaYOC+hpT7ZDMjaEXcw3082U5cEvznNZ6e9oMvD0y
# 0BvL9WH8dQgAdryBDvjA4VzPxBFy5xtkSdgimnUVQvUtMjiB2vRgorq0Uvtc4GEk
# JU+y38kpqHNDUdq9Y9YfW5v3LhtPEx33Sg1xfpe39D+E68Hjo0mh+s6nv1bPull2
# YYlffqe0jmd4+TaY4cso2luHpoovMIIFMTCCBBmgAwIBAgIQCqEl1tYyG35B5AXa
# NpfCFTANBgkqhkiG9w0BAQsFADBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGln
# aUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtE
# aWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMTYwMTA3MTIwMDAwWhcNMzEw
# MTA3MTIwMDAwWjByMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5j
# MRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMTEwLwYDVQQDEyhEaWdpQ2VydCBT
# SEEyIEFzc3VyZWQgSUQgVGltZXN0YW1waW5nIENBMIIBIjANBgkqhkiG9w0BAQEF
# AAOCAQ8AMIIBCgKCAQEAvdAy7kvNj3/dqbqCmcU5VChXtiNKxA4HRTNREH3Q+X1N
# aH7ntqD0jbOI5Je/YyGQmL8TvFfTw+F+CNZqFAA49y4eO+7MpvYyWf5fZT/gm+vj
# RkcGGlV+Cyd+wKL1oODeIj8O/36V+/OjuiI+GKwR5PCZA207hXwJ0+5dyJoLVOOo
# CXFr4M8iEA91z3FyTgqt30A6XLdR4aF5FMZNJCMwXbzsPGBqrC8HzP3w6kfZiFBe
# /WZuVmEnKYmEUeaC50ZQ/ZQqLKfkdT66mA+Ef58xFNat1fJky3seBdCEGXIX8RcG
# 7z3N1k3vBkL9olMqT4UdxB08r8/arBD13ays6Vb/kwIDAQABo4IBzjCCAcowHQYD
# VR0OBBYEFPS24SAd/imu0uRhpbKiJbLIFzVuMB8GA1UdIwQYMBaAFEXroq/0ksuC
# MS1Ri6enIZ3zbcgPMBIGA1UdEwEB/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGG
# MBMGA1UdJQQMMAoGCCsGAQUFBwMIMHkGCCsGAQUFBwEBBG0wazAkBggrBgEFBQcw
# AYYYaHR0cDovL29jc3AuZGlnaWNlcnQuY29tMEMGCCsGAQUFBzAChjdodHRwOi8v
# Y2FjZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNlcnRBc3N1cmVkSURSb290Q0EuY3J0
# MIGBBgNVHR8EejB4MDqgOKA2hjRodHRwOi8vY3JsNC5kaWdpY2VydC5jb20vRGln
# aUNlcnRBc3N1cmVkSURSb290Q0EuY3JsMDqgOKA2hjRodHRwOi8vY3JsMy5kaWdp
# Y2VydC5jb20vRGlnaUNlcnRBc3N1cmVkSURSb290Q0EuY3JsMFAGA1UdIARJMEcw
# OAYKYIZIAYb9bAACBDAqMCgGCCsGAQUFBwIBFhxodHRwczovL3d3dy5kaWdpY2Vy
# dC5jb20vQ1BTMAsGCWCGSAGG/WwHATANBgkqhkiG9w0BAQsFAAOCAQEAcZUS6VGH
# VmnN793afKpjerN4zwY3QITvS4S/ys8DAv3Fp8MOIEIsr3fzKx8MIVoqtwU0HWqu
# mfgnoma/Capg33akOpMP+LLR2HwZYuhegiUexLoceywh4tZbLBQ1QwRostt1AuBy
# x5jWPGTlH0gQGF+JOGFNYkYkh2OMkVIsrymJ5Xgf1gsUpYDXEkdws3XVk4WTfraS
# Z/tTYYmo9WuWwPRYaQ18yAGxuSh1t5ljhSKMYcp5lH5Z/IwP42+1ASa2bKXuh1Eh
# 5Fhgm7oMLSttosR+u8QlK0cCCHxJrhO24XxCQijGGFbPQTS2Zl22dHv1VjMiLyI2
# skuiSpXY9aaOUjGCAk0wggJJAgEBMIGGMHIxCzAJBgNVBAYTAlVTMRUwEwYDVQQK
# EwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xMTAvBgNV
# BAMTKERpZ2lDZXJ0IFNIQTIgQXNzdXJlZCBJRCBUaW1lc3RhbXBpbmcgQ0ECEA1C
# SuC+Ooj/YEAhzhQA8N0wDQYJYIZIAWUDBAIBBQCggZgwGgYJKoZIhvcNAQkDMQ0G
# CyqGSIb3DQEJEAEEMBwGCSqGSIb3DQEJBTEPFw0yMTA0MDgwOTExMDlaMCsGCyqG
# SIb3DQEJEAIMMRwwGjAYMBYEFOHXgqjhkb7va8oWkbWqtJSmJJvzMC8GCSqGSIb3
# DQEJBDEiBCCHEAmNNj2zWjWYRfEi4FgzZvrI16kv/U2b9b3oHw6UVDANBgkqhkiG
# 9w0BAQEFAASCAQCdefEKh6Qmwx7xGCkrYi/A+/Cla6LdnYJp38eMs3fqTTvjhyDw
# HffXrwdqWy5/fgW3o3qJXqa5o7hLxYIoWSULOCpJRGdt+w7XKPAbZqHrN9elAhWJ
# vpBTCEaj7dVxr1Ka4NsoPSYe0eidDBmmvGvp02J4Z1j8+ImQPKN6Hv/L8Ixaxe7V
# mH4VtXIiBK8xXdi4wzO+A+qLtHEJXz3Gw8Bp3BNtlDGIUkIhVTM3Q1xcSEqhOLqo
# PGdwCw9acxdXNWWPjOJkNH656Bvmkml+0p6MTGIeG4JCeRh1Wpqm1ZGSoEcXNaof
# wOgj48YzI+dNqBD9i7RSWCqJr2ygYKRTxnuU
# SIG # End signature block
|
NVIDIA-Omniverse/PhysX/blast/tools/packman/bootstrap/configure.bat | :: Copyright 2019-2023 NVIDIA CORPORATION
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
set PM_PACKMAN_VERSION=7.10.1
:: Specify where packman command is rooted
set PM_INSTALL_PATH=%~dp0..
:: The external root may already be configured and we should do minimal work in that case
if defined PM_PACKAGES_ROOT goto ENSURE_DIR
:: If the folder isn't set we assume that the best place for it is on the drive that we are currently
:: running from
set PM_DRIVE=%CD:~0,2%
set PM_PACKAGES_ROOT=%PM_DRIVE%\packman-repo
:: We use *setx* here so that the variable is persisted in the user environment
echo Setting user environment variable PM_PACKAGES_ROOT to %PM_PACKAGES_ROOT%
setx PM_PACKAGES_ROOT %PM_PACKAGES_ROOT%
if %errorlevel% neq 0 ( goto ERROR )
:: The above doesn't work properly from a build step in VisualStudio because a separate process is
:: spawned for it so it will be lost for subsequent compilation steps - VisualStudio must
:: be launched from a new process. We catch this odd-ball case here:
if defined PM_DISABLE_VS_WARNING goto ENSURE_DIR
if not defined VSLANG goto ENSURE_DIR
echo The above is a once-per-computer operation. Unfortunately VisualStudio cannot pick up environment change
echo unless *VisualStudio is RELAUNCHED*.
echo If you are launching VisualStudio from command line or command line utility make sure
echo you have a fresh launch environment (relaunch the command line or utility).
echo If you are using 'linkPath' and referring to packages via local folder links you can safely ignore this warning.
echo You can disable this warning by setting the environment variable PM_DISABLE_VS_WARNING.
echo.
:: Check for the directory that we need. Note that mkdir will create any directories
:: that may be needed in the path
:ENSURE_DIR
if not exist "%PM_PACKAGES_ROOT%" (
echo Creating packman packages cache at %PM_PACKAGES_ROOT%
mkdir "%PM_PACKAGES_ROOT%"
)
if %errorlevel% neq 0 ( goto ERROR_MKDIR_PACKAGES_ROOT )
:: The Python interpreter may already be externally configured
if defined PM_PYTHON_EXT (
set PM_PYTHON=%PM_PYTHON_EXT%
goto PACKMAN
)
set PM_PYTHON_VERSION=3.10.5-1-windows-x86_64
set PM_PYTHON_BASE_DIR=%PM_PACKAGES_ROOT%\python
set PM_PYTHON_DIR=%PM_PYTHON_BASE_DIR%\%PM_PYTHON_VERSION%
set PM_PYTHON=%PM_PYTHON_DIR%\python.exe
if exist "%PM_PYTHON%" goto PACKMAN
if not exist "%PM_PYTHON_BASE_DIR%" call :CREATE_PYTHON_BASE_DIR
set PM_PYTHON_PACKAGE=python@%PM_PYTHON_VERSION%.cab
for /f "delims=" %%a in ('powershell -ExecutionPolicy ByPass -NoLogo -NoProfile -File "%~dp0\generate_temp_file_name.ps1"') do set TEMP_FILE_NAME=%%a
set TARGET=%TEMP_FILE_NAME%.zip
call "%~dp0fetch_file_from_packman_bootstrap.cmd" %PM_PYTHON_PACKAGE% "%TARGET%"
if %errorlevel% neq 0 (
echo !!! Error fetching python from CDN !!!
goto ERROR
)
for /f "delims=" %%a in ('powershell -ExecutionPolicy ByPass -NoLogo -NoProfile -File "%~dp0\generate_temp_folder.ps1" -parentPath "%PM_PYTHON_BASE_DIR%"') do set TEMP_FOLDER_NAME=%%a
echo Unpacking Python interpreter ...
"%SystemRoot%\system32\expand.exe" -F:* "%TARGET%" "%TEMP_FOLDER_NAME%" 1> nul
del "%TARGET%"
:: Failure during extraction to temp folder name, need to clean up and abort
if %errorlevel% neq 0 (
echo !!! Error unpacking python !!!
call :CLEAN_UP_TEMP_FOLDER
goto ERROR
)
:: If python has now been installed by a concurrent process we need to clean up and then continue
if exist "%PM_PYTHON%" (
call :CLEAN_UP_TEMP_FOLDER
goto PACKMAN
) else (
if exist "%PM_PYTHON_DIR%" ( rd /s /q "%PM_PYTHON_DIR%" > nul )
)
:: Perform atomic move (allowing ovewrite, /y)
move /y "%TEMP_FOLDER_NAME%" "%PM_PYTHON_DIR%" 1> nul
:: Verify that python.exe is now where we expect
if exist "%PM_PYTHON%" goto PACKMAN
:: Wait a second and try again (can help with access denied weirdness)
timeout /t 1 /nobreak 1> nul
move /y "%TEMP_FOLDER_NAME%" "%PM_PYTHON_DIR%" 1> nul
if %errorlevel% neq 0 (
echo !!! Error moving python %TEMP_FOLDER_NAME% -> %PM_PYTHON_DIR% !!!
call :CLEAN_UP_TEMP_FOLDER
goto ERROR
)
:PACKMAN
:: The packman module may already be externally configured
if defined PM_MODULE_DIR_EXT (
set PM_MODULE_DIR=%PM_MODULE_DIR_EXT%
) else (
set PM_MODULE_DIR=%PM_PACKAGES_ROOT%\packman-common\%PM_PACKMAN_VERSION%
)
set PM_MODULE=%PM_MODULE_DIR%\run.py
if exist "%PM_MODULE%" goto END
:: Clean out broken PM_MODULE_DIR if it exists
if exist "%PM_MODULE_DIR%" ( rd /s /q "%PM_MODULE_DIR%" > nul )
set PM_MODULE_PACKAGE=packman-common@%PM_PACKMAN_VERSION%.zip
for /f "delims=" %%a in ('powershell -ExecutionPolicy ByPass -NoLogo -NoProfile -File "%~dp0\generate_temp_file_name.ps1"') do set TEMP_FILE_NAME=%%a
set TARGET=%TEMP_FILE_NAME%
call "%~dp0fetch_file_from_packman_bootstrap.cmd" %PM_MODULE_PACKAGE% "%TARGET%"
if %errorlevel% neq 0 (
echo !!! Error fetching packman from CDN !!!
goto ERROR
)
echo Unpacking ...
"%PM_PYTHON%" -S -s -u -E "%~dp0\install_package.py" "%TARGET%" "%PM_MODULE_DIR%"
if %errorlevel% neq 0 (
echo !!! Error unpacking packman !!!
goto ERROR
)
del "%TARGET%"
goto END
:ERROR_MKDIR_PACKAGES_ROOT
echo Failed to automatically create packman packages repo at %PM_PACKAGES_ROOT%.
echo Please set a location explicitly that packman has permission to write to, by issuing:
echo.
echo setx PM_PACKAGES_ROOT {path-you-choose-for-storing-packman-packages-locally}
echo.
echo Then launch a new command console for the changes to take effect and run packman command again.
exit /B %errorlevel%
:ERROR
echo !!! Failure while configuring local machine :( !!!
exit /B %errorlevel%
:CLEAN_UP_TEMP_FOLDER
rd /S /Q "%TEMP_FOLDER_NAME%"
exit /B
:CREATE_PYTHON_BASE_DIR
:: We ignore errors and clean error state - if two processes create the directory one will fail which is fine
md "%PM_PYTHON_BASE_DIR%" > nul 2>&1
exit /B 0
:END
|
NVIDIA-Omniverse/PhysX/blast/tools/packman/bootstrap/fetch_file_from_packman_bootstrap.cmd | :: Copyright 2019 NVIDIA CORPORATION
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
:: You need to specify <package-name> <target-path> as input to this command
@setlocal
@set PACKAGE_NAME=%1
@set TARGET_PATH=%2
@echo Fetching %PACKAGE_NAME% ...
@powershell -ExecutionPolicy ByPass -NoLogo -NoProfile -File "%~dp0download_file_from_url.ps1" ^
-source "http://bootstrap.packman.nvidia.com/%PACKAGE_NAME%" -output %TARGET_PATH%
:: A bug in powershell prevents the errorlevel code from being set when using the -File execution option
:: We must therefore do our own failure analysis, basically make sure the file exists:
@if not exist %TARGET_PATH% goto ERROR_DOWNLOAD_FAILED
@endlocal
@exit /b 0
:ERROR_DOWNLOAD_FAILED
@echo Failed to download file from S3
@echo Most likely because endpoint cannot be reached or file %PACKAGE_NAME% doesn't exist
@endlocal
@exit /b 1 |
NVIDIA-Omniverse/PhysX/blast/tools/packman/bootstrap/download_file_from_url.ps1 | <#
Copyright 2019 NVIDIA CORPORATION
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#>
param(
[Parameter(Mandatory=$true)][string]$source=$null,
[string]$output="out.exe"
)
$filename = $output
$triesLeft = 4
$delay = 2
do
{
$triesLeft -= 1
try
{
Write-Host "Downloading from bootstrap.packman.nvidia.com ..."
$wc = New-Object net.webclient
$wc.Downloadfile($source, $fileName)
exit 0
}
catch
{
Write-Host "Error downloading $source!"
Write-Host $_.Exception|format-list -force
if ($triesLeft)
{
Write-Host "Retrying in $delay seconds ..."
Start-Sleep -seconds $delay
}
$delay = $delay * $delay
}
} while ($triesLeft -gt 0)
# We only get here if the retries have been exhausted, remove any left-overs:
if (Test-Path $fileName)
{
Remove-Item $fileName
}
exit 1 |
NVIDIA-Omniverse/PhysX/blast/tools/packman/bootstrap/generate_temp_folder.ps1 | <#
Copyright 2019 NVIDIA CORPORATION
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#>
param(
[Parameter(Mandatory=$true)][string]$parentPath=$null
)
[string] $name = [System.Guid]::NewGuid()
$out = Join-Path $parentPath $name
New-Item -ItemType Directory -Path ($out) | Out-Null
Write-Host $out
# SIG # Begin signature block
# MIIaVwYJKoZIhvcNAQcCoIIaSDCCGkQCAQExDzANBglghkgBZQMEAgEFADB5Bgor
# BgEEAYI3AgEEoGswaTA0BgorBgEEAYI3AgEeMCYCAwEAAAQQH8w7YFlLCE63JNLG
# KX7zUQIBAAIBAAIBAAIBAAIBADAxMA0GCWCGSAFlAwQCAQUABCB29nsqMEu+VmSF
# 7ckeVTPrEZ6hsXjOgPFlJm9ilgHUB6CCCiIwggTTMIIDu6ADAgECAhBi50XpIWUh
# PJcfXEkK6hKlMA0GCSqGSIb3DQEBCwUAMIGEMQswCQYDVQQGEwJVUzEdMBsGA1UE
# ChMUU3ltYW50ZWMgQ29ycG9yYXRpb24xHzAdBgNVBAsTFlN5bWFudGVjIFRydXN0
# IE5ldHdvcmsxNTAzBgNVBAMTLFN5bWFudGVjIENsYXNzIDMgU0hBMjU2IENvZGUg
# U2lnbmluZyBDQSAtIEcyMB4XDTE4MDcwOTAwMDAwMFoXDTIxMDcwOTIzNTk1OVow
# gYMxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRQwEgYDVQQHDAtT
# YW50YSBDbGFyYTEbMBkGA1UECgwSTlZJRElBIENvcnBvcmF0aW9uMQ8wDQYDVQQL
# DAZJVC1NSVMxGzAZBgNVBAMMEk5WSURJQSBDb3Jwb3JhdGlvbjCCASIwDQYJKoZI
# hvcNAQEBBQADggEPADCCAQoCggEBALEZN63dA47T4i90jZ84CJ/aWUwVtLff8AyP
# YspFfIZGdZYiMgdb8A5tBh7653y0G/LZL6CVUkgejcpvBU/Dl/52a+gSWy2qJ2bH
# jMFMKCyQDhdpCAKMOUKSC9rfzm4cFeA9ct91LQCAait4LhLlZt/HF7aG+r0FgCZa
# HJjJvE7KNY9G4AZXxjSt8CXS8/8NQMANqjLX1r+F+Hl8PzQ1fVx0mMsbdtaIV4Pj
# 5flAeTUnz6+dCTx3vTUo8MYtkS2UBaQv7t7H2B7iwJDakEQKk1XHswJdeqG0osDU
# z6+NVks7uWE1N8UIhvzbw0FEX/U2kpfyWaB/J3gMl8rVR8idPj8CAwEAAaOCAT4w
# ggE6MAkGA1UdEwQCMAAwDgYDVR0PAQH/BAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUF
# BwMDMGEGA1UdIARaMFgwVgYGZ4EMAQQBMEwwIwYIKwYBBQUHAgEWF2h0dHBzOi8v
# ZC5zeW1jYi5jb20vY3BzMCUGCCsGAQUFBwICMBkMF2h0dHBzOi8vZC5zeW1jYi5j
# b20vcnBhMB8GA1UdIwQYMBaAFNTABiJJ6zlL3ZPiXKG4R3YJcgNYMCsGA1UdHwQk
# MCIwIKAeoByGGmh0dHA6Ly9yYi5zeW1jYi5jb20vcmIuY3JsMFcGCCsGAQUFBwEB
# BEswSTAfBggrBgEFBQcwAYYTaHR0cDovL3JiLnN5bWNkLmNvbTAmBggrBgEFBQcw
# AoYaaHR0cDovL3JiLnN5bWNiLmNvbS9yYi5jcnQwDQYJKoZIhvcNAQELBQADggEB
# AIJKh5vKJdhHJtMzATmc1BmXIQ3RaJONOZ5jMHn7HOkYU1JP0OIzb4pXXkH8Xwfr
# K6bnd72IhcteyksvKsGpSvK0PBBwzodERTAu1Os2N+EaakxQwV/xtqDm1E3IhjHk
# fRshyKKzmFk2Ci323J4lHtpWUj5Hz61b8gd72jH7xnihGi+LORJ2uRNZ3YuqMNC3
# SBC8tAyoJqEoTJirULUCXW6wX4XUm5P2sx+htPw7szGblVKbQ+PFinNGnsSEZeKz
# D8jUb++1cvgTKH59Y6lm43nsJjkZU77tNqyq4ABwgQRk6lt8cS2PPwjZvTmvdnla
# ZhR0K4of+pQaUQHXVIBdji8wggVHMIIEL6ADAgECAhB8GzU1SufbdOdBXxFpymuo
# MA0GCSqGSIb3DQEBCwUAMIG9MQswCQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNp
# Z24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNV
# BAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl
# IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmlj
# YXRpb24gQXV0aG9yaXR5MB4XDTE0MDcyMjAwMDAwMFoXDTI0MDcyMTIzNTk1OVow
# gYQxCzAJBgNVBAYTAlVTMR0wGwYDVQQKExRTeW1hbnRlYyBDb3Jwb3JhdGlvbjEf
# MB0GA1UECxMWU3ltYW50ZWMgVHJ1c3QgTmV0d29yazE1MDMGA1UEAxMsU3ltYW50
# ZWMgQ2xhc3MgMyBTSEEyNTYgQ29kZSBTaWduaW5nIENBIC0gRzIwggEiMA0GCSqG
# SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDXlUPU3N9nrjn7UqS2JjEEcOm3jlsqujdp
# NZWPu8Aw54bYc7vf69F2P4pWjustS/BXGE6xjaUz0wt1I9VqeSfdo9P3Dodltd6t
# HPH1NbQiUa8iocFdS5B/wFlOq515qQLXHkmxO02H/sJ4q7/vUq6crwjZOeWaUT5p
# XzAQTnFjbFjh8CAzGw90vlvLEuHbjMSAlHK79kWansElC/ujHJ7YpglwcezAR0yP
# fcPeGc4+7gRyjhfT//CyBTIZTNOwHJ/+pXggQnBBsCaMbwDIOgARQXpBsKeKkQSg
# mXj0d7TzYCrmbFAEtxRg/w1R9KiLhP4h2lxeffUpeU+wRHRvbXL/AgMBAAGjggF4
# MIIBdDAuBggrBgEFBQcBAQQiMCAwHgYIKwYBBQUHMAGGEmh0dHA6Ly9zLnN5bWNk
# LmNvbTASBgNVHRMBAf8ECDAGAQH/AgEAMGYGA1UdIARfMF0wWwYLYIZIAYb4RQEH
# FwMwTDAjBggrBgEFBQcCARYXaHR0cHM6Ly9kLnN5bWNiLmNvbS9jcHMwJQYIKwYB
# BQUHAgIwGRoXaHR0cHM6Ly9kLnN5bWNiLmNvbS9ycGEwNgYDVR0fBC8wLTAroCmg
# J4YlaHR0cDovL3Muc3ltY2IuY29tL3VuaXZlcnNhbC1yb290LmNybDATBgNVHSUE
# DDAKBggrBgEFBQcDAzAOBgNVHQ8BAf8EBAMCAQYwKQYDVR0RBCIwIKQeMBwxGjAY
# BgNVBAMTEVN5bWFudGVjUEtJLTEtNzI0MB0GA1UdDgQWBBTUwAYiSes5S92T4lyh
# uEd2CXIDWDAfBgNVHSMEGDAWgBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG
# 9w0BAQsFAAOCAQEAf+vKp+qLdkLrPo4gVDDjt7nc+kg+FscPRZUQzSeGo2bzAu1x
# +KrCVZeRcIP5Un5SaTzJ8eCURoAYu6HUpFam8x0AkdWG80iH4MvENGggXrTL+QXt
# nK9wUye56D5+UaBpcYvcUe2AOiUyn0SvbkMo0yF1u5fYi4uM/qkERgSF9xWcSxGN
# xCwX/tVuf5riVpLxlrOtLfn039qJmc6yOETA90d7yiW5+ipoM5tQct6on9TNLAs0
# vYsweEDgjY4nG5BvGr4IFYFd6y/iUedRHsl4KeceZb847wFKAQkkDhbEFHnBQTc0
# 0D2RUpSd4WjvCPDiaZxnbpALGpNx1CYCw8BaIzGCD4swgg+HAgEBMIGZMIGEMQsw
# CQYDVQQGEwJVUzEdMBsGA1UEChMUU3ltYW50ZWMgQ29ycG9yYXRpb24xHzAdBgNV
# BAsTFlN5bWFudGVjIFRydXN0IE5ldHdvcmsxNTAzBgNVBAMTLFN5bWFudGVjIENs
# YXNzIDMgU0hBMjU2IENvZGUgU2lnbmluZyBDQSAtIEcyAhBi50XpIWUhPJcfXEkK
# 6hKlMA0GCWCGSAFlAwQCAQUAoHwwEAYKKwYBBAGCNwIBDDECMAAwGQYJKoZIhvcN
# AQkDMQwGCisGAQQBgjcCAQQwHAYKKwYBBAGCNwIBCzEOMAwGCisGAQQBgjcCARUw
# LwYJKoZIhvcNAQkEMSIEIG5YDmcpqLxn4SB0H6OnuVkZRPh6OJ77eGW/6Su/uuJg
# MA0GCSqGSIb3DQEBAQUABIIBAA3N2vqfA6WDgqz/7EoAKVIE5Hn7xpYDGhPvFAMV
# BslVpeqE3apTcYFCEcwLtzIEc/zmpULxsX8B0SUT2VXbJN3zzQ80b+gbgpq62Zk+
# dQLOtLSiPhGW7MXLahgES6Oc2dUFaQ+wDfcelkrQaOVZkM4wwAzSapxuf/13oSIk
# ZX2ewQEwTZrVYXELO02KQIKUR30s/oslGVg77ALnfK9qSS96Iwjd4MyT7PzCkHUi
# ilwyGJi5a4ofiULiPSwUQNynSBqxa+JQALkHP682b5xhjoDfyG8laR234FTPtYgs
# P/FaeviwENU5Pl+812NbbtRD+gKlWBZz+7FKykOT/CG8sZahgg1EMIINQAYKKwYB
# BAGCNwMDATGCDTAwgg0sBgkqhkiG9w0BBwKggg0dMIINGQIBAzEPMA0GCWCGSAFl
# AwQCAQUAMHcGCyqGSIb3DQEJEAEEoGgEZjBkAgEBBglghkgBhv1sBwEwMTANBglg
# hkgBZQMEAgEFAAQgJhABfkDIPbI+nWYnA30FLTyaPK+W3QieT21B/vK+CMICEDF0
# worcGsdd7OxpXLP60xgYDzIwMjEwNDA4MDkxMTA5WqCCCjcwggT+MIID5qADAgEC
# AhANQkrgvjqI/2BAIc4UAPDdMA0GCSqGSIb3DQEBCwUAMHIxCzAJBgNVBAYTAlVT
# MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
# b20xMTAvBgNVBAMTKERpZ2lDZXJ0IFNIQTIgQXNzdXJlZCBJRCBUaW1lc3RhbXBp
# bmcgQ0EwHhcNMjEwMTAxMDAwMDAwWhcNMzEwMTA2MDAwMDAwWjBIMQswCQYDVQQG
# EwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xIDAeBgNVBAMTF0RpZ2lDZXJ0
# IFRpbWVzdGFtcCAyMDIxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
# wuZhhGfFivUNCKRFymNrUdc6EUK9CnV1TZS0DFC1JhD+HchvkWsMlucaXEjvROW/
# m2HNFZFiWrj/ZwucY/02aoH6KfjdK3CF3gIY83htvH35x20JPb5qdofpir34hF0e
# dsnkxnZ2OlPR0dNaNo/Go+EvGzq3YdZz7E5tM4p8XUUtS7FQ5kE6N1aG3JMjjfdQ
# Jehk5t3Tjy9XtYcg6w6OLNUj2vRNeEbjA4MxKUpcDDGKSoyIxfcwWvkUrxVfbENJ
# Cf0mI1P2jWPoGqtbsR0wwptpgrTb/FZUvB+hh6u+elsKIC9LCcmVp42y+tZji06l
# chzun3oBc/gZ1v4NSYS9AQIDAQABo4IBuDCCAbQwDgYDVR0PAQH/BAQDAgeAMAwG
# A1UdEwEB/wQCMAAwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwgwQQYDVR0gBDowODA2
# BglghkgBhv1sBwEwKTAnBggrBgEFBQcCARYbaHR0cDovL3d3dy5kaWdpY2VydC5j
# b20vQ1BTMB8GA1UdIwQYMBaAFPS24SAd/imu0uRhpbKiJbLIFzVuMB0GA1UdDgQW
# BBQ2RIaOpLqwZr68KC0dRDbd42p6vDBxBgNVHR8EajBoMDKgMKAuhixodHRwOi8v
# Y3JsMy5kaWdpY2VydC5jb20vc2hhMi1hc3N1cmVkLXRzLmNybDAyoDCgLoYsaHR0
# cDovL2NybDQuZGlnaWNlcnQuY29tL3NoYTItYXNzdXJlZC10cy5jcmwwgYUGCCsG
# AQUFBwEBBHkwdzAkBggrBgEFBQcwAYYYaHR0cDovL29jc3AuZGlnaWNlcnQuY29t
# ME8GCCsGAQUFBzAChkNodHRwOi8vY2FjZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNl
# cnRTSEEyQXNzdXJlZElEVGltZXN0YW1waW5nQ0EuY3J0MA0GCSqGSIb3DQEBCwUA
# A4IBAQBIHNy16ZojvOca5yAOjmdG/UJyUXQKI0ejq5LSJcRwWb4UoOUngaVNFBUZ
# B3nw0QTDhtk7vf5EAmZN7WmkD/a4cM9i6PVRSnh5Nnont/PnUp+Tp+1DnnvntN1B
# Ion7h6JGA0789P63ZHdjXyNSaYOC+hpT7ZDMjaEXcw3082U5cEvznNZ6e9oMvD0y
# 0BvL9WH8dQgAdryBDvjA4VzPxBFy5xtkSdgimnUVQvUtMjiB2vRgorq0Uvtc4GEk
# JU+y38kpqHNDUdq9Y9YfW5v3LhtPEx33Sg1xfpe39D+E68Hjo0mh+s6nv1bPull2
# YYlffqe0jmd4+TaY4cso2luHpoovMIIFMTCCBBmgAwIBAgIQCqEl1tYyG35B5AXa
# NpfCFTANBgkqhkiG9w0BAQsFADBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGln
# aUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtE
# aWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMTYwMTA3MTIwMDAwWhcNMzEw
# MTA3MTIwMDAwWjByMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5j
# MRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMTEwLwYDVQQDEyhEaWdpQ2VydCBT
# SEEyIEFzc3VyZWQgSUQgVGltZXN0YW1waW5nIENBMIIBIjANBgkqhkiG9w0BAQEF
# AAOCAQ8AMIIBCgKCAQEAvdAy7kvNj3/dqbqCmcU5VChXtiNKxA4HRTNREH3Q+X1N
# aH7ntqD0jbOI5Je/YyGQmL8TvFfTw+F+CNZqFAA49y4eO+7MpvYyWf5fZT/gm+vj
# RkcGGlV+Cyd+wKL1oODeIj8O/36V+/OjuiI+GKwR5PCZA207hXwJ0+5dyJoLVOOo
# CXFr4M8iEA91z3FyTgqt30A6XLdR4aF5FMZNJCMwXbzsPGBqrC8HzP3w6kfZiFBe
# /WZuVmEnKYmEUeaC50ZQ/ZQqLKfkdT66mA+Ef58xFNat1fJky3seBdCEGXIX8RcG
# 7z3N1k3vBkL9olMqT4UdxB08r8/arBD13ays6Vb/kwIDAQABo4IBzjCCAcowHQYD
# VR0OBBYEFPS24SAd/imu0uRhpbKiJbLIFzVuMB8GA1UdIwQYMBaAFEXroq/0ksuC
# MS1Ri6enIZ3zbcgPMBIGA1UdEwEB/wQIMAYBAf8CAQAwDgYDVR0PAQH/BAQDAgGG
# MBMGA1UdJQQMMAoGCCsGAQUFBwMIMHkGCCsGAQUFBwEBBG0wazAkBggrBgEFBQcw
# AYYYaHR0cDovL29jc3AuZGlnaWNlcnQuY29tMEMGCCsGAQUFBzAChjdodHRwOi8v
# Y2FjZXJ0cy5kaWdpY2VydC5jb20vRGlnaUNlcnRBc3N1cmVkSURSb290Q0EuY3J0
# MIGBBgNVHR8EejB4MDqgOKA2hjRodHRwOi8vY3JsNC5kaWdpY2VydC5jb20vRGln
# aUNlcnRBc3N1cmVkSURSb290Q0EuY3JsMDqgOKA2hjRodHRwOi8vY3JsMy5kaWdp
# Y2VydC5jb20vRGlnaUNlcnRBc3N1cmVkSURSb290Q0EuY3JsMFAGA1UdIARJMEcw
# OAYKYIZIAYb9bAACBDAqMCgGCCsGAQUFBwIBFhxodHRwczovL3d3dy5kaWdpY2Vy
# dC5jb20vQ1BTMAsGCWCGSAGG/WwHATANBgkqhkiG9w0BAQsFAAOCAQEAcZUS6VGH
# VmnN793afKpjerN4zwY3QITvS4S/ys8DAv3Fp8MOIEIsr3fzKx8MIVoqtwU0HWqu
# mfgnoma/Capg33akOpMP+LLR2HwZYuhegiUexLoceywh4tZbLBQ1QwRostt1AuBy
# x5jWPGTlH0gQGF+JOGFNYkYkh2OMkVIsrymJ5Xgf1gsUpYDXEkdws3XVk4WTfraS
# Z/tTYYmo9WuWwPRYaQ18yAGxuSh1t5ljhSKMYcp5lH5Z/IwP42+1ASa2bKXuh1Eh
# 5Fhgm7oMLSttosR+u8QlK0cCCHxJrhO24XxCQijGGFbPQTS2Zl22dHv1VjMiLyI2
# skuiSpXY9aaOUjGCAk0wggJJAgEBMIGGMHIxCzAJBgNVBAYTAlVTMRUwEwYDVQQK
# EwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20xMTAvBgNV
# BAMTKERpZ2lDZXJ0IFNIQTIgQXNzdXJlZCBJRCBUaW1lc3RhbXBpbmcgQ0ECEA1C
# SuC+Ooj/YEAhzhQA8N0wDQYJYIZIAWUDBAIBBQCggZgwGgYJKoZIhvcNAQkDMQ0G
# CyqGSIb3DQEJEAEEMBwGCSqGSIb3DQEJBTEPFw0yMTA0MDgwOTExMDlaMCsGCyqG
# SIb3DQEJEAIMMRwwGjAYMBYEFOHXgqjhkb7va8oWkbWqtJSmJJvzMC8GCSqGSIb3
# DQEJBDEiBCDvFxQ6lYLr8vB+9czUl19rjCw1pWhhUXw/SqOmvIa/VDANBgkqhkiG
# 9w0BAQEFAASCAQB9ox2UrcUXQsBI4Uycnhl4AMpvhVXJME62tygFMppW1l7QftDy
# LvfPKRYm2YUioak/APxAS6geRKpeMkLvXuQS/Jlv0kY3BjxkeG0eVjvyjF4SvXbZ
# 3JCk9m7wLNE+xqOo0ICjYlIJJgRLudjWkC5Skpb1NpPS8DOaIYwRV+AWaSOUPd9P
# O5yVcnbl7OpK3EAEtwDrybCVBMPn2MGhAXybIHnth3+MFp1b6Blhz3WlReQyarjq
# 1f+zaFB79rg6JswXoOTJhwICBP3hO2Ua3dMAswbfl+QNXF+igKLJPYnaeSVhBbm6
# VCu2io27t4ixqvoD0RuPObNX/P3oVA38afiM
# SIG # End signature block
|
NVIDIA-Omniverse/PhysX/blast/tools/packman/bootstrap/install_package.py | # Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import os
import stat
import time
from typing import Any, Callable
RENAME_RETRY_COUNT = 100
RENAME_RETRY_DELAY = 0.1
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
def remove_directory_item(path):
if os.path.islink(path) or os.path.isfile(path):
try:
os.remove(path)
except PermissionError:
# make sure we have access and try again:
os.chmod(path, stat.S_IRWXU)
os.remove(path)
else:
# try first to delete the dir because this will work for folder junctions, otherwise we would follow the junctions and cause destruction!
clean_out_folder = False
try:
# make sure we have access preemptively - this is necessary because recursing into a directory without permissions
# will only lead to heart ache
os.chmod(path, stat.S_IRWXU)
os.rmdir(path)
except OSError:
clean_out_folder = True
if clean_out_folder:
# we should make sure the directory is empty
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
remove_directory_item(fullname)
# now try to again get rid of the folder - and not catch if it raises:
os.rmdir(path)
class StagingDirectory:
def __init__(self, staging_path):
self.staging_path = staging_path
self.temp_folder_path = None
os.makedirs(staging_path, exist_ok=True)
def __enter__(self):
self.temp_folder_path = tempfile.mkdtemp(prefix="ver-", dir=self.staging_path)
return self
def get_temp_folder_path(self):
return self.temp_folder_path
# this function renames the temp staging folder to folder_name, it is required that the parent path exists!
def promote_and_rename(self, folder_name):
abs_dst_folder_name = os.path.join(self.staging_path, folder_name)
os.rename(self.temp_folder_path, abs_dst_folder_name)
def __exit__(self, type, value, traceback):
# Remove temp staging folder if it's still there (something went wrong):
path = self.temp_folder_path
if os.path.isdir(path):
remove_directory_item(path)
def rename_folder(staging_dir: StagingDirectory, folder_name: str):
try:
staging_dir.promote_and_rename(folder_name)
except OSError as exc:
# if we failed to rename because the folder now exists we can assume that another packman process
# has managed to update the package before us - in all other cases we re-raise the exception
abs_dst_folder_name = os.path.join(staging_dir.staging_path, folder_name)
if os.path.exists(abs_dst_folder_name):
logger.warning(
f"Directory {abs_dst_folder_name} already present, package installation already completed"
)
else:
raise
def call_with_retry(
op_name: str, func: Callable, retry_count: int = 3, retry_delay: float = 20
) -> Any:
retries_left = retry_count
while True:
try:
return func()
except (OSError, IOError) as exc:
logger.warning(f"Failure while executing {op_name} [{str(exc)}]")
if retries_left:
retry_str = "retry" if retries_left == 1 else "retries"
logger.warning(
f"Retrying after {retry_delay} seconds"
f" ({retries_left} {retry_str} left) ..."
)
time.sleep(retry_delay)
else:
logger.error("Maximum retries exceeded, giving up")
raise
retries_left -= 1
def rename_folder_with_retry(staging_dir: StagingDirectory, folder_name):
dst_path = os.path.join(staging_dir.staging_path, folder_name)
call_with_retry(
f"rename {staging_dir.get_temp_folder_path()} -> {dst_path}",
lambda: rename_folder(staging_dir, folder_name),
RENAME_RETRY_COUNT,
RENAME_RETRY_DELAY,
)
def install_package(package_path, install_path):
staging_path, version = os.path.split(install_path)
with StagingDirectory(staging_path) as staging_dir:
output_folder = staging_dir.get_temp_folder_path()
with zipfile.ZipFile(package_path, allowZip64=True) as zip_file:
zip_file.extractall(output_folder)
# attempt the rename operation
rename_folder_with_retry(staging_dir, version)
print(f"Package successfully installed to {install_path}")
if __name__ == "__main__":
executable_paths = os.getenv("PATH")
paths_list = executable_paths.split(os.path.pathsep) if executable_paths else []
target_path_np = os.path.normpath(sys.argv[2])
target_path_np_nc = os.path.normcase(target_path_np)
for exec_path in paths_list:
if os.path.normcase(os.path.normpath(exec_path)) == target_path_np_nc:
raise RuntimeError(f"packman will not install to executable path '{exec_path}'")
install_package(sys.argv[1], target_path_np)
|
NVIDIA-Omniverse/PhysX/blast/PACKAGE-LICENSES/vhacd-LICENSE.md | Copyright (c) 2011 Khaled Mamou (kmamou at gmail dot com)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. The names of the contributors may not be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
|
NVIDIA-Omniverse/PhysX/blast/PACKAGE-LICENSES/boost-LICENSE.md | Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
|
NVIDIA-Omniverse/PhysX/blast/PACKAGE-LICENSES/blast-sdk-LICENSE.md | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/TkBaseTest.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef TKBASETEST_H
#define TKBASETEST_H
#include "NvBlastTk.h"
#include "NvBlastTkActor.h"
#include "NvTaskManager.h"
#include "NvBlastTkGroupTaskManager.h"
#include "NvCpuDispatcher.h"
#include "NsGlobals.h"
#include "BlastBaseTest.h"
#include "NvBlastExtDamageShaders.h"
#include "NvBlastIndexFns.h"
#include "TestProfiler.h"
#include "NvTask.h"
#include <thread>
#include <algorithm>
#include <queue>
#include <mutex>
#include <condition_variable>
#include <atomic>
using namespace Nv::Blast;
using namespace nvidia;
using namespace nvidia::task;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Helpers
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
NV_INLINE void ExpectArrayMatch(TkObject** arr0, size_t size0, TkObject** arr1, size_t size1)
{
EXPECT_TRUE(size0 == size1);
std::set<TkObject*> set0(arr0, arr0 + size0);
std::set<TkObject*> set1(arr1, arr1 + size1);
EXPECT_TRUE(set0 == set1);
}
class TestCpuDispatcher : public NvCpuDispatcher
{
struct SharedContext
{
std::queue<NvBaseTask*> workQueue;
std::condition_variable cv;
std::mutex mutex;
std::atomic<bool> quit;
};
void submitTask(NvBaseTask& task) override
{
if (m_threads.size() > 0)
{
std::unique_lock<std::mutex> lk(m_context.mutex);
m_context.workQueue.push(&task);
lk.unlock();
m_context.cv.notify_one();
}
else
{
TEST_ZONE_BEGIN(task.getName());
task.run();
TEST_ZONE_END(task.getName());
task.release();
}
}
uint32_t getWorkerCount() const override { return (uint32_t)m_threads.size(); }
static void execute(SharedContext& context)
{
while (!context.quit)
{
std::unique_lock<std::mutex> lk(context.mutex);
if (!context.workQueue.empty())
{
NvBaseTask& task = *context.workQueue.front();
context.workQueue.pop();
lk.unlock();
TEST_ZONE_BEGIN(task.getName());
task.run();
TEST_ZONE_END(task.getName());
task.release();
}
else
{
// shared variables must be modified under the mutex in order
// to correctly publish the modification to the waiting thread
context.cv.wait(lk, [&]{ return !context.workQueue.empty() || context.quit; });
}
}
}
SharedContext m_context;
std::vector<std::thread> m_threads;
public:
TestCpuDispatcher(uint32_t numWorkers)
{
m_context.quit = false;
for (uint32_t i = 0; i < numWorkers; ++i)
{
m_threads.push_back(std::thread(execute, std::ref(m_context)));
}
}
void release()
{
std::unique_lock<std::mutex> lk(m_context.mutex);
m_context.quit = true;
lk.unlock();
m_context.cv.notify_all();
for (std::thread& t : m_threads)
{
t.join();
}
delete this;
}
};
struct CSParams
{
CSParams(uint32_t axis_, float coord_) : axis(axis_), coord(coord_) {}
uint32_t axis;
float coord;
};
static void CubeSlicer(NvBlastFractureBuffers* outbuf, const NvBlastGraphShaderActor* actor, const void* params)
{
uint32_t bondFractureCount = 0;
uint32_t bondFractureCountMax = outbuf->bondFractureCount;
const CSParams& p = *reinterpret_cast<const CSParams*> (reinterpret_cast<const NvBlastExtProgramParams*>(params)->damageDesc);
uint32_t currentNodeIndex = actor->firstGraphNodeIndex;
while (!Nv::Blast::isInvalidIndex(currentNodeIndex))
{
for (uint32_t adj = actor->adjacencyPartition[currentNodeIndex]; adj < actor->adjacencyPartition[currentNodeIndex + 1]; ++adj)
{
if (currentNodeIndex < actor->adjacentNodeIndices[adj])
{
if (actor->assetBonds[actor->adjacentBondIndices[adj]].centroid[p.axis] == p.coord && bondFractureCount < bondFractureCountMax)
{
NvBlastBondFractureData& data = outbuf->bondFractures[bondFractureCount++];
data.userdata = 0;
data.nodeIndex0 = currentNodeIndex;
data.nodeIndex1 = actor->adjacentNodeIndices[adj];
data.health = 1.0f;
}
}
}
currentNodeIndex = actor->graphNodeIndexLinks[currentNodeIndex];
}
outbuf->bondFractureCount = bondFractureCount;
outbuf->chunkFractureCount = 0;
//printf("slicer outcount %d\n", bondFractureCount);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// TkBaseTest Class
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<int FailLevel, int Verbosity>
class TkBaseTest : public BlastBaseTest<FailLevel, Verbosity>
{
public:
TkBaseTest() : m_cpuDispatcher(), m_taskman(nullptr)
{
}
virtual void SetUp() override
{
NvBlastInternalProfilerSetDetail(Nv::Blast::InternalProfilerDetail::LOW);
NvBlastInternalProfilerSetPlatformEnabled(true);
m_cpuDispatcher = new TestCpuDispatcher(4);
m_taskman = NvTaskManager::createTaskManager(*NvBlastGlobalGetErrorCallback(), m_cpuDispatcher);
m_groupTM = TkGroupTaskManager::create(*m_taskman);
}
virtual void TearDown() override
{
m_groupTM->release();
m_cpuDispatcher->release();
if (m_taskman) m_taskman->release();
}
void createFramework()
{
TkFramework* framework = NvBlastTkFrameworkCreate();
EXPECT_TRUE(framework != nullptr);
EXPECT_EQ(framework, NvBlastTkFrameworkGet());
}
void releaseFramework()
{
TkFramework* framework = NvBlastTkFrameworkGet();
framework->release();
EXPECT_TRUE(NvBlastTkFrameworkGet() == nullptr);
}
void createTestAssets(bool addInternalJoints = false)
{
const uint8_t cube1BondDescFlags_internalJoints[12] =
{
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::NoFlags,
TkAssetDesc::BondJointed,
TkAssetDesc::BondJointed,
TkAssetDesc::BondJointed,
TkAssetDesc::BondJointed
};
const uint32_t assetDescCount = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
TkFramework* framework = NvBlastTkFrameworkGet();
for (uint32_t i = 0; i < assetDescCount; ++i)
{
TkAssetDesc desc;
reinterpret_cast<NvBlastAssetDesc&>(desc) = g_assetDescs[i];
desc.bondFlags = addInternalJoints ? cube1BondDescFlags_internalJoints : nullptr;
testAssets.push_back(framework->createAsset(desc));
EXPECT_TRUE(testAssets[i] != nullptr);
}
}
TkAsset* createCubeAsset(size_t maxDepth, size_t width, int32_t supportDepth = -1, bool addInternalJoints = false)
{
TkFramework* framework = NvBlastTkFrameworkGet();
GeneratorAsset cube;
TkAssetDesc assetDesc;
generateCube(cube, assetDesc, maxDepth, width, supportDepth);
std::vector<uint8_t> bondFlags(assetDesc.bondCount);
std::fill(bondFlags.begin(), bondFlags.end(), addInternalJoints ? 1 : 0);
assetDesc.bondFlags = bondFlags.data();
TkAsset* cubeAsset = framework->createAsset(assetDesc);
testAssets.push_back(cubeAsset);
return cubeAsset;
}
void releaseTestAssets()
{
for (uint32_t i = 0; i < testAssets.size(); ++i)
{
testAssets[i]->release();
}
testAssets.clear();
}
NvBlastExtRadialDamageDesc getRadialDamageDesc(float x, float y, float z, float minRadius = 10.0f, float maxRadius = 10.0f, float damage = 1.0f)
{
NvBlastExtRadialDamageDesc desc;
desc.position[0] = x;
desc.position[1] = y;
desc.position[2] = z;
desc.minRadius = minRadius;
desc.maxRadius = maxRadius;
desc.damage = damage;
return desc;
}
NvBlastExtShearDamageDesc getShearDamageDesc(float x, float y, float z, float shearX = 1.0f, float shearY = 0.0f, float shearZ = 0.0f, float minRadius = 10.0f, float maxRadius = 10.0f, float damage = 1.0f)
{
NvBlastExtShearDamageDesc desc;
desc.position[0] = x;
desc.position[1] = y;
desc.position[2] = z;
desc.normal[0] = shearX;
desc.normal[1] = shearY;
desc.normal[2] = shearZ;
desc.minRadius = minRadius;
desc.maxRadius = maxRadius;
desc.damage = damage;
return desc;
}
static const NvBlastDamageProgram& getCubeSlicerProgram()
{
static NvBlastDamageProgram program = { CubeSlicer, nullptr };
return program;
}
static const NvBlastDamageProgram& getFalloffProgram()
{
static NvBlastDamageProgram program = { NvBlastExtFalloffGraphShader, NvBlastExtFalloffSubgraphShader };
return program;
}
static const NvBlastDamageProgram& getShearProgram()
{
static NvBlastDamageProgram program = { NvBlastExtShearGraphShader, NvBlastExtShearSubgraphShader };
return program;
}
static const NvBlastExtMaterial* getDefaultMaterial()
{
static NvBlastExtMaterial material;
return &material;
};
TkFamily* familySerialization(TkFamily* family);
std::vector<TkAsset*> testAssets;
TestCpuDispatcher* m_cpuDispatcher;
NvTaskManager* m_taskman;
TkGroupTaskManager* m_groupTM;
};
#define TkNvErrorMask (NvErrorCode::eINVALID_PARAMETER | NvErrorCode::eINVALID_OPERATION | NvErrorCode::eOUT_OF_MEMORY | NvErrorCode::eINTERNAL_ERROR | NvErrorCode::eABORT)
#define TkNvWarningMask (NvErrorCode::eDEBUG_WARNING | NvErrorCode::ePERF_WARNING)
typedef TkBaseTest<NvBlastMessage::Error, 1> TkTestAllowWarnings;
typedef TkBaseTest<NvBlastMessage::Warning, 1> TkTestStrict;
class TestFamilyTracker : public TkEventListener
{
public:
TestFamilyTracker() {}
typedef std::pair<TkFamily*, uint32_t> Actor;
virtual void receive(const TkEvent* events, uint32_t eventCount) override
{
TEST_ZONE_BEGIN("TestFamilyTracker");
for (size_t i = 0; i < eventCount; ++i)
{
const TkEvent& e = events[i];
switch (e.type)
{
case (TkEvent::Split):
{
const TkSplitEvent* splitEvent = e.getPayload<TkSplitEvent>();
EXPECT_EQ((size_t)1, actors.erase(Actor(splitEvent->parentData.family, splitEvent->parentData.index)));
for (size_t i = 0; i < splitEvent->numChildren; ++i)
{
TkActor* a = splitEvent->children[i];
EXPECT_TRUE(actors.insert(Actor(&a->getFamily(), a->getIndex())).second);
}
break;
}
case (TkEvent::FractureCommand):
{
const TkFractureCommands* fracEvent = e.getPayload<TkFractureCommands>();
EXPECT_TRUE(!isInvalidIndex(fracEvent->tkActorData.index));
#if 0
printf("chunks broken: %d\n", fracEvent->buffers.chunkFractureCount);
printf("bonds broken: %d\n", fracEvent->buffers.bondFractureCount);
for (uint32_t t = 0; t < fracEvent->buffers.bondFractureCount; t++)
{
//printf("%x ", fracEvent->buffers.bondFractures[t].userdata);
}
//printf("\n");
#endif
break;
}
case (TkEvent::FractureEvent):
{
const TkFractureEvents* fracEvent = e.getPayload<TkFractureEvents>();
EXPECT_TRUE(!isInvalidIndex(fracEvent->tkActorData.index));
break;
}
case (TkEvent::JointUpdate):
{
const TkJointUpdateEvent* jointEvent = e.getPayload<TkJointUpdateEvent>();
TkJoint* joint = jointEvent->joint;
EXPECT_TRUE(joint != nullptr);
switch (jointEvent->subtype)
{
case TkJointUpdateEvent::External:
EXPECT_TRUE(joints.end() == joints.find(joint)); // We should not have this joint yet
joints.insert(joint);
break;
case TkJointUpdateEvent::Changed:
break;
case TkJointUpdateEvent::Unreferenced:
EXPECT_EQ(1, joints.erase(joint));
joint->release();
break;
}
break;
}
default:
break;
}
}
TEST_ZONE_END("TestFamilyTracker");
}
void insertActor(const TkActor* actor)
{
actors.insert(TestFamilyTracker::Actor(&actor->getFamily(), actor->getIndex()));
}
void eraseActor(const TkActor* actor)
{
actors.erase(TestFamilyTracker::Actor(&actor->getFamily(), actor->getIndex()));
}
std::set<Actor> actors;
std::set<TkJoint*> joints;
};
#endif // #ifndef TKBASETEST_H
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/BlastBaseTest.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef BLASTBASETEST_H
#define BLASTBASETEST_H
#include "NvBlastTkFramework.h"
#include "gtest/gtest.h"
#include "NvBlast.h"
#include "TestAssets.h"
#include "NvBlastGlobals.h"
#include <ostream>
template<int FailLevel, int Verbosity>
class BlastBaseTest : public testing::Test, public nvidia::NvErrorCallback
{
public:
BlastBaseTest()
{
NvBlastGlobalSetErrorCallback(this);
}
// A zeroing alloc with the same signature as malloc
static void* alignedZeroedAlloc(size_t size)
{
return memset(NVBLAST_ALLOC(size), 0, size);
}
static void alignedFree(void* mem)
{
NVBLAST_FREE(mem);
}
// Message log for blast functions
static void messageLog(int type, const char* msg, const char* file, int line)
{
if (FailLevel >= type)
{
switch (type)
{
case NvBlastMessage::Error: EXPECT_TRUE(false) << "NvBlast Error message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Warning: EXPECT_TRUE(false) << "NvBlast Warning message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Info: EXPECT_TRUE(false) << "NvBlast Info message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Debug: EXPECT_TRUE(false) << "NvBlast Debug message in " << file << "(" << line << "): " << msg << "\n"; break;
}
}
else
if (Verbosity > 0)
{
switch (type)
{
case NvBlastMessage::Error: std::cout << "NvBlast Error message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Warning: std::cout << "NvBlast Warning message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Info: std::cout << "NvBlast Info message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Debug: std::cout << "NvBlast Debug message in " << file << "(" << line << "): " << msg << "\n"; break;
}
}
}
// nvidia::NvErrorCallback interface
virtual void reportError(nvidia::NvErrorCode::Enum code, const char* message, const char* file, int line) override
{
uint32_t failMask = 0;
switch (FailLevel)
{
case NvBlastMessage::Debug:
case NvBlastMessage::Info: failMask |= nvidia::NvErrorCode::eDEBUG_INFO;
case NvBlastMessage::Warning: failMask |= nvidia::NvErrorCode::eDEBUG_WARNING;
case NvBlastMessage::Error: failMask |= nvidia::NvErrorCode::eABORT | nvidia::NvErrorCode::eABORT | nvidia::NvErrorCode::eINTERNAL_ERROR | nvidia::NvErrorCode::eOUT_OF_MEMORY | nvidia::NvErrorCode::eINVALID_OPERATION | nvidia::NvErrorCode::eINVALID_PARAMETER;
default: break;
}
if (!(failMask & code) && Verbosity <= 0)
{
return;
}
std::string output = "NvBlast Test ";
switch (code)
{
case nvidia::NvErrorCode::eNO_ERROR: break;
case nvidia::NvErrorCode::eDEBUG_INFO: output += "Debug Info"; break;
case nvidia::NvErrorCode::eDEBUG_WARNING: output += "Debug Warning"; break;
case nvidia::NvErrorCode::eINVALID_PARAMETER: output += "Invalid Parameter"; break;
case nvidia::NvErrorCode::eINVALID_OPERATION: output += "Invalid Operation"; break;
case nvidia::NvErrorCode::eOUT_OF_MEMORY: output += "Out of Memory"; break;
case nvidia::NvErrorCode::eINTERNAL_ERROR: output += "Internal Error"; break;
case nvidia::NvErrorCode::eABORT: output += "Abort"; break;
case nvidia::NvErrorCode::ePERF_WARNING: output += "Perf Warning"; break;
default: FAIL();
}
output += std::string(" message in ") + file + "(" + std::to_string(line) + "): " + message + "\n";
if (failMask & code)
{
EXPECT_TRUE(false) << output;
}
else
{
std::cout << output;
}
}
};
#endif // #ifndef BLASTBASETEST_H
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/perf/BlastBasePerfTest.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef BLASTBASEPERFTEST_H
#define BLASTBASEPERFTEST_H
#include "BlastBaseTest.h"
#include <fstream>
#include <algorithm>
#include <map>
template<typename T>
class DataCollection
{
public:
struct Stats
{
double m_mean;
double m_sdev;
double m_min;
double m_max;
Stats()
{
reset();
}
void reset()
{
m_mean = 0.0;
m_sdev = 0.0;
m_min = std::numeric_limits<double>().max();
m_max = -std::numeric_limits<double>().max();
}
};
struct DataSet
{
std::vector<T> m_data;
Stats m_stats;
void calculateStats()
{
m_stats.reset();
if (m_data.size() > 0)
{
if (m_data.size() > 1) // Remove top half of values to eliminate outliers
{
std::sort(m_data.begin(), m_data.end());
m_data.resize(m_data.size() / 2);
}
for (size_t i = 0; i < m_data.size(); ++i)
{
m_stats.m_mean += m_data[i];
m_stats.m_min = std::min(m_stats.m_min, (double)m_data[i]);
m_stats.m_max = std::max(m_stats.m_max, (double)m_data[i]);
}
m_stats.m_mean /= m_data.size();
if (m_data.size() > 1)
{
for (size_t i = 0; i < m_data.size(); ++i)
{
m_stats.m_sdev += pow(m_data[i] - m_stats.m_mean, 2);
}
m_stats.m_sdev = sqrt(m_stats.m_sdev / (m_data.size() - 1));
}
}
}
};
DataSet& getDataSet(const std::string& name)
{
auto entry = m_lookup.find(name);
if (entry != m_lookup.end())
{
return m_dataSets[entry->second];
}
m_lookup[name] = m_dataSets.size();
m_dataSets.push_back(DataSet());
return m_dataSets.back();
}
bool dataSetExists(const std::string& name) const
{
return m_lookup.find(name) != m_lookup.end();
}
void calculateStats()
{
for (size_t i = 0; i < m_dataSets.size(); ++i)
{
m_dataSets[i].calculateStats();
}
}
void test(DataCollection<int64_t>& calibration, double relativeThreshold = 0.10, double tickThreshold = 100.0)
{
for (auto entry = m_lookup.begin(); entry != m_lookup.end(); ++entry)
{
const std::string& name = entry->first;
DataCollection<int64_t>::DataSet& data = m_dataSets[entry->second];
data.calculateStats();
if (!calibration.dataSetExists(name))
{
FAIL() << "PerfTest is not calibrated!" << std::endl << "Missing DataSet: " << name << std::endl;
}
const DataCollection<int64_t>::DataSet& cal = calibration.getDataSet(name);
const double calMin = cal.m_stats.m_min;
if (data.m_stats.m_min > (1.0 + relativeThreshold) * calMin && data.m_stats.m_min - calMin > tickThreshold)
{
std::cout << name << ":" << std::endl;
std::cout << "PERF - : Timing (" << data.m_stats.m_min << ") exceeds recorded min (" << calMin << ") by more than allowed relative threshold (" << relativeThreshold*100 << "%) and absolute threshold (" << tickThreshold << " ticks)." << std::endl;
EXPECT_FALSE(data.m_stats.m_min > (1.0 + relativeThreshold) * calMin && data.m_stats.m_min - calMin > tickThreshold)
<< name << ":" << std::endl
<< "PERF - : Timing (" << data.m_stats.m_min << ") exceeds recorded min (" << calMin << ") by more than allowed relative threshold (" << relativeThreshold * 100 << "%) and absolute threshold (" << tickThreshold << " ticks)." << std::endl;
}
else
if (data.m_stats.m_min < (1.0 - relativeThreshold) * calMin && data.m_stats.m_min - calMin < -tickThreshold)
{
std::cout << name << ":" << std::endl;
std::cout << "PERF + : Timing (" << data.m_stats.m_min << ") is less than the recorded min (" << calMin << ") by more than the relative threshold (" << relativeThreshold * 100 << "%) and absolute threshold (" << tickThreshold << " ticks)." << std::endl;
}
}
}
size_t size() const
{
return m_dataSets.size();
}
void clear()
{
m_lookup.clear();
m_dataSets.clear();
}
template<class S>
friend std::istream& operator >> (std::istream& stream, DataCollection<S>& c);
template<class S>
friend std::ostream& operator << (std::ostream& stream, const DataCollection<S>& c);
private:
std::map<std::string, size_t> m_lookup;
std::vector< DataSet > m_dataSets;
};
template<typename T>
std::istream& operator >> (std::istream& stream, DataCollection<T>& c)
{
std::string name;
while (!stream.eof())
{
std::getline(stream >> std::ws, name);
typename DataCollection<T>::DataSet& dataSet = c.getDataSet(name);
stream >> dataSet.m_stats.m_mean >> dataSet.m_stats.m_sdev >> dataSet.m_stats.m_min >> dataSet.m_stats.m_max >> std::ws;
}
return stream;
}
template<typename T>
std::ostream& operator << (std::ostream& stream, const DataCollection<T>& c)
{
for (auto entry = c.m_lookup.begin(); entry != c.m_lookup.end(); ++entry)
{
const std::string& name = entry->first;
stream << name.c_str() << std::endl;
const typename DataCollection<T>::DataSet& data = c.m_dataSets[entry->second];
stream << data.m_stats.m_mean << " " << data.m_stats.m_sdev << " " << data.m_stats.m_min << " " << data.m_stats.m_max << std::endl;
}
return stream;
}
static const char* getPlatformSuffix()
{
#if NV_WIN32
return "win32";
#elif NV_WIN64
return "win64";
#elif NV_LINUX
#if NV_X64
return "linux64";
#else
return "linux32";
#endif
#else
return "gen";
#endif
}
static const char* getPlatformRoot()
{
#if NV_LINUX
return "../../";
#else
return "../../../";
#endif
}
static std::string defaultRelativeDataPath()
{
const char* dataDir = "test/data/";
std::string rootDir = getPlatformRoot();
return rootDir + dataDir + getPlatformSuffix() + "/";
}
class PerfTestEngine
{
public:
PerfTestEngine(const char* collectionName) : m_calibrate(false)
{
m_filename = defaultRelativeDataPath() + std::string(collectionName) + "_" + getPlatformSuffix() + ".cal";
auto argvs = testing::internal::GetArgvs();
size_t argCount = argvs.size();
for (size_t argNum = 0; argNum < argCount; ++argNum)
{
if (argvs[argNum] == "-calibrate")
{
m_calibrate = true;
}
else
if (argvs[argNum] == "-calPath")
{
if (++argNum < argCount)
{
m_filename = argvs[argNum];
}
}
}
if (!m_calibrate)
{
std::ifstream in;
in.open(m_filename);
if (in.is_open())
{
std::string name;
std::getline(in, name); // Eat header
std::getline(in, name); // Eat header (2 lines)
in >> m_dataCalibration;
in.close();
}
m_calibrate = m_dataCalibration.size() == 0;
}
if (m_calibrate)
{
std::ofstream out;
out.open(m_filename);
if (out.is_open())
{
out << "Format: timing name (whole line)" << std::endl << "timing mean s.d. min max" << std::endl; // Header (2 lines)
out.close();
}
}
if (m_calibrate)
{
std::cout << "******** Calibration Mode ********\n";
}
else
{
std::cout << "******** Test Mode ********\n";
std::cout << "Read calibration data from " << m_filename << std::endl;
}
}
void endTest()
{
if (m_calibrate)
{
m_dataTempCollection.calculateStats();
std::ofstream out;
out.open(m_filename, std::ofstream::app);
if (out.is_open())
{
out << m_dataTempCollection;
out.close();
std::cout << "Calibration stats written to " << m_filename << std::endl;
}
else
{
std::cout << "Failed to open calibration file " << m_filename << ". Stats not written." << std::endl;
FAIL() << "Failed to open calibration file " << m_filename << ". Stats not written." << std::endl;
}
}
else
{
m_dataTempCollection.test(m_dataCalibration);
}
m_dataTempCollection.clear();
}
void reportData(const std::string& name, int64_t data)
{
m_dataTempCollection.getDataSet(name).m_data.push_back(data);
}
private:
std::string m_filename;
bool m_calibrate;
DataCollection<int64_t> m_dataTempCollection;
DataCollection<int64_t> m_dataCalibration;
};
template<int FailLevel, int Verbosity>
class BlastBasePerfTest : public BlastBaseTest<FailLevel, Verbosity>
{
public:
/**
This function allows to create/destroy and get PerfTestEngine in local static variable (works header only).
It allows to have PeftTestEngine alive through life span of gtest TestCase.
*/
static PerfTestEngine* getEngineDeadOrAlive(bool alive = true)
{
static PerfTestEngine* engine = nullptr;
if (alive && !engine)
{
engine = new PerfTestEngine(::testing::UnitTest::GetInstance()->current_test_case()->name());
}
else if (!alive && engine)
{
delete engine;
engine = nullptr;
}
return engine;
}
static void SetUpTestCase()
{
getEngineDeadOrAlive();
}
static void TearDownTestCase()
{
getEngineDeadOrAlive(false);
}
void TearDown() override
{
getEngineDeadOrAlive()->endTest();
}
void reportData(const std::string& name, int64_t data)
{
getEngineDeadOrAlive()->reportData(name, data);
}
};
#endif // #ifndef BLASTBASEPERFTEST_H
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/perf/SolverPerfTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "BlastBasePerfTest.h"
#include "TestAssets.h"
#include "NvBlastExtDamageShaders.h"
#include <memory>
static void blast
(
std::set<NvBlastActor*>& actorsToDamage,
GeneratorAsset* testAsset,
GeneratorAsset::Vec3 localPos,
float minRadius, float maxRadius,
float compressiveDamage,
NvBlastTimers& timers
)
{
std::vector<NvBlastChunkFractureData> chunkEvents; /* num lower-support chunks + bonds */
std::vector<NvBlastBondFractureData> bondEvents; /* num lower-support chunks + bonds */
chunkEvents.resize(testAsset->solverChunks.size());
bondEvents.resize(testAsset->solverBonds.size());
NvBlastExtRadialDamageDesc damage[] = {
compressiveDamage,
{ localPos.x, localPos.y, localPos.z },
minRadius,
maxRadius
};
NvBlastExtProgramParams programParams =
{
damage,
nullptr
};
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
nullptr
};
std::vector<char> splitScratch;
std::vector<NvBlastActor*> newActors(testAsset->solverChunks.size());
size_t totalNewActorsCount = 0;
for (std::set<NvBlastActor*>::iterator k = actorsToDamage.begin(); k != actorsToDamage.end();)
{
NvBlastActor* actor = *k;
NvBlastFractureBuffers events = { (uint32_t)bondEvents.size(), (uint32_t)chunkEvents.size(), bondEvents.data(), chunkEvents.data() };
NvBlastActorGenerateFracture(&events, actor, program, &programParams, nullptr, &timers);
NvBlastActorApplyFracture(&events, actor, &events, nullptr, &timers);
bool removeActor = false;
if (events.bondFractureCount + events.chunkFractureCount > 0)
{
splitScratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, nullptr));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = &newActors[totalNewActorsCount];
const size_t bufferSize = newActors.size() - totalNewActorsCount;
const size_t newActorsCount = NvBlastActorSplit(&result, actor, (uint32_t)bufferSize, splitScratch.data(), nullptr, &timers);
totalNewActorsCount += newActorsCount;
removeActor = newActorsCount > 0;
}
if (removeActor)
{
k = actorsToDamage.erase(k);
}
else
{
++k;
}
}
for (size_t i = 0; i < totalNewActorsCount; ++i)
{
actorsToDamage.insert(newActors[i]);
}
}
typedef BlastBasePerfTest<NvBlastMessage::Warning, 1> BlastBasePerfTestStrict;
class PerfTest : public BlastBasePerfTestStrict
{
public:
void damageLeafSupportActors(const char* testName, uint32_t assetCount, uint32_t familyCount, uint32_t damageCount)
{
const float relativeDamageRadius = 0.2f;
const float compressiveDamage = 1.0f;
const uint32_t minChunkCount = 100;
const uint32_t maxChunkCount = 10000;
srand(0);
for (uint32_t assetNum = 0; assetNum < assetCount; ++assetNum)
{
GeneratorAsset cube;
NvBlastAssetDesc desc;
generateRandomCube(cube, desc, minChunkCount, maxChunkCount);
{
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* mem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(mem, &desc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
// Generate familes
for (uint32_t familyNum = 0; familyNum < familyCount; ++familyNum)
{
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = nullptr;
actorDesc.uniformInitialBondHealth = 1.0f;
actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* mem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(mem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
EXPECT_TRUE(family != nullptr);
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// Generate damage
std::set<NvBlastActor*> actors;
actors.insert(actor);
for (uint32_t damageNum = 0; damageNum < damageCount; ++damageNum)
{
GeneratorAsset::Vec3 localPos = cube.extents*GeneratorAsset::Vec3((float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f);
NvBlastTimers timers;
NvBlastTimersReset(&timers);
blast(actors, &cube, localPos, relativeDamageRadius, relativeDamageRadius*1.2f, compressiveDamage, timers);
const std::string timingName = std::string(testName) + " asset " + std::to_string(assetNum) + " family " + std::to_string(familyNum) + " damage " + std::to_string(damageNum);
BlastBasePerfTestStrict::reportData(timingName + " material", timers.material);
BlastBasePerfTestStrict::reportData(timingName + " fracture", timers.fracture);
BlastBasePerfTestStrict::reportData(timingName + " island", timers.island);
BlastBasePerfTestStrict::reportData(timingName + " partition", timers.partition);
BlastBasePerfTestStrict::reportData(timingName + " visibility", timers.visibility);
}
// Release remaining actors
std::for_each(actors.begin(), actors.end(), [](NvBlastActor* a){ NvBlastActorDeactivate(a, messageLog); });
actors.clear();
alignedFree(family);
}
// Release asset data
alignedFree(asset);
}
}
}
};
#if 0
// Tests
TEST_F(PerfTest, DamageLeafSupportActorsTestVisibility)
{
const int trialCount = 1000;
std::cout << "Trial (of " << trialCount << "): ";
for (int trial = 1; trial <= trialCount; ++trial)
{
if (trial % 100 == 0)
{
std::cout << trial << ".. ";
std::cout.flush();
}
damageLeafSupportActors(test_info_->name(), 4, 4, 5);
}
std::cout << "done." << std::endl;
}
#endif |
NVIDIA-Omniverse/PhysX/blast/source/test/src/perf/DamagePerfTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "BlastBasePerfTest.h"
#include "NvBlastExtDamageShaders.h"
#include "NvBlastExtSerialization.h"
#include "NvBlastTime.h"
#include "NvVec3.h"
#include "NvBounds3.h"
#include <memory>
#include <random>
#include <cstdio>
using namespace Nv::Blast;
using namespace nvidia;
static void blast
(
std::set<NvBlastActor*>& actorsToDamage,
GeneratorAsset* testAsset,
NvBlastExtDamageAccelerator* accelerator,
GeneratorAsset::Vec3 localPos,
float minRadius, float maxRadius,
float compressiveDamage,
std::vector<uint32_t>& history,
NvBlastTimers& timers)
{
std::vector<NvBlastChunkFractureData> chunkEvents; /* num lower-support chunks + bonds */
std::vector<NvBlastBondFractureData> bondEvents; /* num lower-support chunks + bonds */
chunkEvents.resize(testAsset->solverChunks.size());
bondEvents.resize(testAsset->solverBonds.size());
NvBlastExtRadialDamageDesc damage = {
compressiveDamage,
{ localPos.x, localPos.y, localPos.z },
minRadius,
maxRadius
};
NvBlastExtMaterial material;
NvBlastExtProgramParams programParams =
{
&damage,
&material,
accelerator
};
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
nullptr
};
std::vector<char> splitScratch;
std::vector<NvBlastActor*> newActors(testAsset->solverChunks.size());
size_t totalNewActorsCount = 0;
for (std::set<NvBlastActor*>::iterator k = actorsToDamage.begin(); k != actorsToDamage.end();)
{
NvBlastActor* actor = *k;
NvBlastFractureBuffers events = { (uint32_t)bondEvents.size(), (uint32_t)chunkEvents.size(), bondEvents.data(), chunkEvents.data() };
NvBlastActorGenerateFracture(&events, actor, program, &programParams, nullptr, &timers);
NvBlastActorApplyFracture(nullptr, actor, &events, nullptr, &timers);
bool removeActor = false;
if (events.bondFractureCount + events.chunkFractureCount > 0)
{
history.push_back(events.bondFractureCount + events.chunkFractureCount);
splitScratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, nullptr));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = &newActors[totalNewActorsCount];
const size_t bufferSize = newActors.size() - totalNewActorsCount;
const size_t newActorsCount = NvBlastActorSplit(&result, actor, (uint32_t)bufferSize, splitScratch.data(), nullptr, &timers);
totalNewActorsCount += newActorsCount;
removeActor = newActorsCount > 0;
}
if (removeActor)
{
k = actorsToDamage.erase(k);
}
else
{
++k;
}
}
for (size_t i = 0; i < totalNewActorsCount; ++i)
{
actorsToDamage.insert(newActors[i]);
}
}
typedef BlastBasePerfTest<NvBlastMessage::Warning, 1> BlastBasePerfTestStrict;
struct PerfResults
{
int64_t totalTime;
int64_t createTime;
};
class PerfTest : public BlastBasePerfTestStrict
{
public:
NvBlastAsset* loadAsset(const char* path, ExtSerialization* ser)
{
std::ifstream infileStream(path, std::ios::binary);
if (!infileStream.is_open())
{
return nullptr;
}
const std::vector<char> inBuffer((std::istreambuf_iterator<char>(infileStream)), std::istreambuf_iterator<char>());
infileStream.close();
NvBlastAsset* asset = static_cast<NvBlastAsset*>(ser->deserializeFromBuffer(inBuffer.data(), inBuffer.size()));
return asset;
}
PerfResults damageLeafSupportActors(const char* testName, uint32_t assetCount, uint32_t familyCount, uint32_t damageCount, int accelType, std::vector<uint32_t>& history)
{
PerfResults results;
results.totalTime = 0;
results.createTime = 0;
const float compressiveDamage = 1.0f;
const uint32_t minChunkCount = 1000;
const uint32_t maxChunkCount = 100000;
srand(0);
for (uint32_t assetNum = 0; assetNum < assetCount; ++assetNum)
{
GeneratorAsset cube;
NvBlastAssetDesc desc;
generateRandomCube(cube, desc, minChunkCount, maxChunkCount);
{
std::vector<char> scratch;
nvidia::NvBounds3 bounds = nvidia::NvBounds3::empty();
#if 1
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* mem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(mem, &desc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
bounds = nvidia::NvBounds3::centerExtents(nvidia::NvVec3(0, 0, 0), nvidia::NvVec3(cube.extents.x, cube.extents.y, cube.extents.z));
#else
// load asset
NvBlastAsset* asset = nullptr;
ExtSerialization* ser = NvBlastExtSerializationCreate();
for (int s = 0; s < 5 && !asset; s++)
{
asset = loadAsset(&"../../../../../test/assets/table.blast"[s * 3], ser);
}
EXPECT_TRUE(asset != nullptr);
ser->release();
uint32_t bc = NvBlastAssetGetBondCount(asset, messageLog);
const NvBlastBond* bonds = NvBlastAssetGetBonds(asset, messageLog);
for (uint32_t i = 0; i < bc; i++)
{
bounds.include(reinterpret_cast<const nvidia::NvVec3&>(bonds[i].centroid));
}
#endif
Nv::Blast::Time t;
NvBlastExtDamageAccelerator* accelerator = NvBlastExtDamageAcceleratorCreate(asset, accelType);
results.createTime += t.getElapsedTicks();
// Generate familes
for (uint32_t familyNum = 0; familyNum < familyCount; ++familyNum)
{
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = nullptr;
actorDesc.uniformInitialBondHealth = 1.0f;
actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* mem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(mem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
EXPECT_TRUE(family != nullptr);
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// Generate damage
std::set<NvBlastActor*> actors;
actors.insert(actor);
for (uint32_t damageNum = 0; damageNum < damageCount; ++damageNum)
{
GeneratorAsset::Vec3 localPos = GeneratorAsset::Vec3((float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f) * 2;
localPos.x *= bounds.getExtents().x;
localPos.y *= bounds.getExtents().y;
localPos.z *= bounds.getExtents().z;
const float relativeDamageRadius = (float)rand() / RAND_MAX * bounds.getExtents().maxElement();
NvBlastTimers timers;
NvBlastTimersReset(&timers);
blast(actors, &cube, accelerator, localPos, relativeDamageRadius, relativeDamageRadius*1.2f, compressiveDamage, history, timers);
const std::string timingName = std::string(testName) + " asset " + std::to_string(assetNum) + " family " + std::to_string(familyNum) + " damage " + std::to_string(damageNum) + " accel " + std::to_string(accelType);
BlastBasePerfTestStrict::reportData(timingName + " material", timers.material);
history.push_back((uint32_t)actors.size());
results.totalTime += timers.material;
history.push_back(0); // separator
}
// Release remaining actors
std::for_each(actors.begin(), actors.end(), [](NvBlastActor* a) { NvBlastActorDeactivate(a, messageLog); });
actors.clear();
alignedFree(family);
}
if (accelerator)
accelerator->release();
// Release asset data
alignedFree(asset);
}
}
return results;
}
};
// Tests
TEST_F(PerfTest, DISABLED_DamageRadialSimple)
{
const int trialCount = 10;
std::cout << "Trial (of " << trialCount << "): ";
for (int trial = 1; trial <= trialCount; ++trial)
{
if (trial % 100 == 0)
{
std::cout << trial << ".. ";
std::cout.flush();
}
std::vector<uint32_t> history1, history2;
uint32_t assetCount = 4;
uint32_t familyCount = 4;
uint32_t damageCount = 4;
PerfResults results0 = damageLeafSupportActors(test_info_->name(), assetCount, familyCount, damageCount, 0, history1);
BlastBasePerfTestStrict::reportData("DamageRadialSimple total0 " , results0.totalTime);
BlastBasePerfTestStrict::reportData("DamageRadialSimple create0 ", results0.createTime);
PerfResults results1 = damageLeafSupportActors(test_info_->name(), assetCount, familyCount, damageCount, 1, history2);
BlastBasePerfTestStrict::reportData("DamageRadialSimple total1 ", results1.totalTime);
BlastBasePerfTestStrict::reportData("DamageRadialSimple create1 ", results1.createTime);
EXPECT_TRUE(history1 == history2);
}
std::cout << "done." << std::endl;
}
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/SyncTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "TkBaseTest.h"
#include "NvBlastExtPxSync.h"
#include "NvBlastTkEvent.h"
#include <map>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// ExtSync Tests
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
class Base
{
public:
Base(TkTestStrict* test) : m_test(test)
{
}
void run(std::stringstream& finalState)
{
//////// initial setup ////////
m_test->createTestAssets();
TkFramework* fwk = NvBlastTkFrameworkGet();
TkGroupDesc gdesc;
gdesc.workerCount = m_test->m_taskman->getCpuDispatcher()->getWorkerCount();
m_group = fwk->createGroup(gdesc);
EXPECT_TRUE(m_group != nullptr);
TkActorDesc adesc(m_test->testAssets[0]);
NvBlastID id;
TkActor* actor0 = fwk->createActor(adesc);
EXPECT_TRUE(actor0 != nullptr);
families[0] = &actor0->getFamily();
memcpy(id.data, "Mumble Jumble Bumble", sizeof(NvBlastID)); // Stuffing an arbitrary 16 bytes (The prefix of the given string)
families[0]->setID(id);
m_group->addActor(*actor0);
TkActor* actor1 = fwk->createActor(adesc);
EXPECT_TRUE(actor1 != nullptr);
families[1] = &actor1->getFamily();
memcpy(id.data, "buzzkillerdiller", sizeof(NvBlastID)); // Stuffing an arbitrary 16 bytes (The prefix of the given string)
families[1]->setID(id);
m_group->addActor(*actor1);
m_test->m_groupTM->setGroup(m_group);
//////// server/client specific impl ////////
impl();
//////// write out framework final state ////////
finalState.clear();
for (auto family : families)
{
std::vector<TkActor*> actors(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (auto actor : actors)
{
finalState << actor->getVisibleChunkCount();
finalState << actor->getGraphNodeCount();
std::vector<uint32_t> chunkIndices(actor->getGraphNodeCount());
actor->getVisibleChunkIndices(chunkIndices.data(), (uint32_t)chunkIndices.size());
for (uint32_t chunkIndex : chunkIndices)
finalState << chunkIndex;
const float* bondHealths = actor->getBondHealths();
for (uint32_t i = 0; i < actor->getAsset()->getBondCount(); ++i)
finalState << bondHealths[i];
}
}
//////// release ////////
m_group->release();
for (auto family : families)
{
family->release();
}
m_test->releaseTestAssets();
}
protected:
virtual void impl() = 0;
TkTestStrict* m_test;
TkGroup* m_group;
TkFamily* families[2];
};
class Server : public Base
{
public:
Server(TkTestStrict* test, std::vector<ExtSyncEvent*>& syncBuffer) : Base(test), m_syncBuffer(syncBuffer) {}
protected:
virtual void impl() override
{
// create sync ext
ExtSync* sync = ExtSync::create();
// add sync as listener to family #1
families[1]->addListener(*sync);
// damage params
CSParams cs0(1, 0.0f);
NvBlastExtProgramParams csParams0 = { &cs0, nullptr };
NvBlastExtRadialDamageDesc radialDamage0 = m_test->getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialParams0 = { &radialDamage0, nullptr };
NvBlastExtRadialDamageDesc radialDamage1 = m_test->getRadialDamageDesc(0, 0, 0, 10.0f, 10.0f, 0.1f);
NvBlastExtProgramParams radialParams1 = { &radialDamage1, nullptr };
// damage family #0 (make it split)
{
TkActor* actor;
families[0]->getActors(&actor, 1);
actor->damage(m_test->getCubeSlicerProgram(), &csParams0);
}
// process
m_test->m_groupTM->process();
m_test->m_groupTM->wait();
EXPECT_EQ(families[0]->getActorCount(), 2);
// sync family #0
sync->syncFamily(*families[0]);
// add sync as listener to family #0
families[0]->addListener(*sync);
// damage family #0 (make it split fully)
{
TkActor* actor;
families[0]->getActors(&actor, 1, 1);
actor->damage(m_test->getFalloffProgram(), &radialParams0);
}
// damage family 1 (just damage bonds health)
{
TkActor* actor;
families[1]->getActors(&actor, 1);
NvBlastExtRadialDamageDesc radialDamage = m_test->getRadialDamageDesc(0, 0, 0, 10.0f, 10.0f, 0.1f);
actor->damage(m_test->getFalloffProgram(), &radialParams1);
}
// process
m_test->m_groupTM->process();
m_test->m_groupTM->wait();
EXPECT_EQ(families[0]->getActorCount(), 5);
EXPECT_EQ(families[1]->getActorCount(), 1);
// take sync buffer from sync
{
const ExtSyncEvent*const* buffer;
uint32_t size;
sync->acquireSyncBuffer(buffer, size);
m_syncBuffer.resize(size);
for (size_t i = 0; i < size; ++i)
{
m_syncBuffer[i] = buffer[i]->clone();
}
sync->releaseSyncBuffer();
}
//
families[0]->removeListener(*sync);
families[1]->removeListener(*sync);
//
sync->release();
}
private:
std::vector<ExtSyncEvent*>& m_syncBuffer;
};
class Client : public Base, public TkEventListener
{
public:
Client(TkTestStrict* test, std::vector<ExtSyncEvent*>& syncBuffer) : Base(test), m_syncBuffer(syncBuffer) {}
protected:
virtual void impl() override
{
ExtSync* sync = ExtSync::create();
// fill map
for (auto& family : families)
{
std::vector<TkActor*> actors(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
auto& actorsSet = m_actorsPerFamily[family];
for (auto actor : actors)
EXPECT_TRUE(actorsSet.insert(actor->getIndex()).second);
}
// subscribe
for (auto& family : families)
{
family->addListener(*this);
}
// apply sync buffer
sync->applySyncBuffer(*NvBlastTkFrameworkGet(), (const Nv::Blast::ExtSyncEvent**)m_syncBuffer.data(), static_cast<uint32_t>(m_syncBuffer.size()), m_group);
// check map
for (auto& family : families)
{
std::vector<TkActor*> actors(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
std::set<uint32_t> actorsSet;
for (auto actor : actors)
EXPECT_TRUE(actorsSet.insert(actor->getIndex()).second);
EXPECT_TRUE(m_actorsPerFamily[family] == actorsSet);
}
// unsubscribe
for (auto& family : families)
{
family->removeListener(*this);
}
m_test->m_groupTM->process();
m_test->m_groupTM->wait();
sync->release();
}
// listen for Split event and update actors map
virtual void receive(const TkEvent* events, uint32_t eventCount) override
{
for (size_t i = 0; i < eventCount; ++i)
{
const TkEvent& e = events[i];
switch (e.type)
{
case (TkEvent::Split) :
{
const TkSplitEvent* splitEvent = e.getPayload<TkSplitEvent>();
auto& actorsSet = m_actorsPerFamily[splitEvent->parentData.family];
if (!isInvalidIndex(splitEvent->parentData.index))
{
EXPECT_EQ((size_t)1, actorsSet.erase(splitEvent->parentData.index));
}
for (size_t i = 0; i < splitEvent->numChildren; ++i)
{
TkActor* a = splitEvent->children[i];
EXPECT_TRUE(actorsSet.insert(a->getIndex()).second);
}
break;
}
case (TkEvent::FractureCommand) :
{
break;
}
case (TkEvent::JointUpdate) :
{
FAIL();
break;
}
default:
break;
}
}
}
private:
std::map<TkFamily*, std::set<uint32_t>> m_actorsPerFamily;
std::vector<ExtSyncEvent*>& m_syncBuffer;
};
TEST_F(TkTestStrict, SyncTest1)
{
this->createFramework();
std::vector<ExtSyncEvent*> syncBuffer;
std::stringstream serverFinalState;
{
Server s(this, syncBuffer);
s.run(serverFinalState);
}
EXPECT_TRUE(syncBuffer.size() > 0);
std::stringstream clientFinalState;
{
Client c(this, syncBuffer);
c.run(clientFinalState);
}
for (auto e : syncBuffer)
{
e->release();
}
syncBuffer.clear();
EXPECT_EQ(serverFinalState.str(), clientFinalState.str());
this->releaseFramework();
}
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/APITests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "BlastBaseTest.h"
#include "NvBlastIndexFns.h"
#include "NvBlastExtDamageShaders.h"
#include <algorithm>
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Utils / Tests Common
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using namespace Nv::Blast;
class APITest : public BlastBaseTest < NvBlastMessage::Error, 1 >
{
};
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Tests
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST_F(APITest, Basic)
{
// create asset
const NvBlastAssetDesc& assetDesc = g_assetDescs[0];
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, messageLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
NvBlastExtRadialDamageDesc damage = {
10.0f, // compressive
{ 0.0f, 0.0f, 0.0f }, // position
4.0f, // min radius - maximum damage
6.0f // max radius - zero damage
};
NvBlastBondFractureData outFracture[12]; /*num lower-support chunks + bonds?*/
NvBlastFractureBuffers events;
events.bondFractureCount = 12;
events.bondFractures = outFracture;
events.chunkFractureCount = 0;
events.chunkFractures = nullptr;
NvBlastExtProgramParams programParams = { &damage, nullptr };
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
nullptr
};
NvBlastActorGenerateFracture(&events, actor, program, &programParams, messageLog, nullptr);
NvBlastActorApplyFracture(&events, actor, &events, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_EQ(12, events.bondFractureCount);
NvBlastActor* newActors[8]; /* num lower-support chunks? plus space for deletedActor */
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors;
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, messageLog));
size_t newActorsCount = NvBlastActorSplit(&result, actor, 8, scratch.data(), messageLog, nullptr);
EXPECT_EQ(8, newActorsCount);
EXPECT_EQ(true, result.deletedActor == actor);
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], messageLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
alignedFree(asset);
}
TEST_F(APITest, DamageBondsCompressive)
{
const size_t bondsCount = 6;
const NvBlastChunkDesc c_chunks[8] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 7 }
};
const NvBlastBondDesc c_bonds[bondsCount] =
{
{ { {-1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 2.0f, 0.0f }, 0 }, { 1, 2 } },
{ { {-1.0f, 0.0f, 0.0f }, 1.0f, {-1.0f, 2.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 0.0f,-1.0f, 0.0f }, 1.0f, {-2.0f, 1.0f, 0.0f }, 0 }, { 3, 4 } },
{ { { 0.0f,-1.0f, 0.0f }, 1.0f, {-2.0f,-1.0f, 0.0f }, 0 }, { 4, 5 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, {-1.0f,-2.0f, 0.0f }, 0 }, { 5, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f,-2.0f, 0.0f }, 0 }, { 6, 7 } }
};
// create asset
const NvBlastAssetDesc assetDesc = { 8, c_chunks, bondsCount, c_bonds };
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, messageLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// get graph nodes check
std::vector<uint32_t> graphNodeIndices;
graphNodeIndices.resize(NvBlastActorGetGraphNodeCount(actor, nullptr));
uint32_t graphNodesCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices.data(), (uint32_t)graphNodeIndices.size(), actor, nullptr);
EXPECT_EQ(graphNodesCount, 7);
NvBlastExtRadialDamageDesc damage = {
1.0f, // compressive
{ 4.0f, 2.0f, 0.0f }, // position
4.0f, // min radius - maximum damage
6.0f // max radius - zero damage
}; // linear falloff
NvBlastBondFractureData outCommands[bondsCount] = {
{ UINT32_MAX, UINT32_MAX, UINT32_MAX, 0 },
{ UINT32_MAX, UINT32_MAX, UINT32_MAX, 0 },
{ UINT32_MAX, UINT32_MAX, UINT32_MAX, 0 },
{ UINT32_MAX, UINT32_MAX, UINT32_MAX, 0 },
{ UINT32_MAX, UINT32_MAX, UINT32_MAX, 0 },
{ UINT32_MAX, UINT32_MAX, UINT32_MAX, 0 },
};
NvBlastFractureBuffers commands = {
6, 0, outCommands, nullptr
};
NvBlastExtProgramParams programParams = { &damage, nullptr };
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
nullptr
};
NvBlastActorGenerateFracture(&commands, actor, program, &programParams, messageLog, nullptr);
ASSERT_EQ(3, commands.bondFractureCount);
ASSERT_EQ(0, commands.chunkFractureCount);
// node indices in _graph_ chunks
NvBlastBondFractureData expectedCommand[] = {
{ 0, 0, 1, 1.0f },
{ 0, 1, 2, 0.5f },
{ 0, 5, 6, 0.5f }
};
for (int i = 0; i < 3; i++)
{
EXPECT_EQ(expectedCommand[i].nodeIndex0, outCommands[i].nodeIndex0);
EXPECT_EQ(expectedCommand[i].nodeIndex1, outCommands[i].nodeIndex1);
EXPECT_EQ(expectedCommand[i].health, outCommands[i].health);
}
const bool actorReleaseResult = NvBlastActorDeactivate(actor, messageLog);
EXPECT_TRUE(actorReleaseResult);
alignedFree(family);
alignedFree(asset);
}
TEST_F(APITest, DirectFractureKillsChunk)
{
// 1--2
// | |
// 3--4 <-- kill 4
const NvBlastChunkDesc c_chunks[9] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 4, NvBlastChunkDesc::NoFlags, 5 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 4, NvBlastChunkDesc::NoFlags, 6 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 4, NvBlastChunkDesc::NoFlags, 7 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 4, NvBlastChunkDesc::NoFlags, 8 },
};
const NvBlastBondDesc c_bonds[4] =
{
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f, 1.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f,-1.0f, 0.0f }, 0 }, { 3, 4 } },
{ { { 0.0f,-1.0f, 0.0f }, 1.0f, {-1.0f, 0.0f, 0.0f }, 0 }, { 1, 3 } },
{ { { 0.0f,-1.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 2, 4 } },
};
NvBlastAssetDesc assetDesc;
assetDesc.chunkCount = 9;
assetDesc.chunkDescs = c_chunks;
assetDesc.bondCount = 4;
assetDesc.bondDescs = c_bonds;
// create asset
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, messageLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
NvBlastChunkFractureData fractureCmd;
fractureCmd.chunkIndex = 4;
fractureCmd.health = 1.0f;
NvBlastFractureBuffers commands = { 0, 1, nullptr, &fractureCmd };
NvBlastChunkFractureData fractureEvt;
NvBlastFractureBuffers events = { 0, 1, nullptr, &fractureEvt };
NvBlastActorApplyFracture(&events, actor, &commands, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_EQ(1, events.chunkFractureCount);
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, messageLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, messageLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), messageLog, nullptr);
newActors.resize(newActorsCount);
EXPECT_EQ(5, newActorsCount);
EXPECT_EQ(actor, result.deletedActor);
// check newActors contain original actor
EXPECT_TRUE(std::any_of(newActors.begin(), newActors.end(), [&](const NvBlastActor* a) { return actor == a; }));
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], messageLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
alignedFree(asset);
}
TEST_F(APITest, DirectFractureKillsIslandRootChunk)
{
// 1--2 <-- kill 1
// | |
// 3--4
const NvBlastChunkDesc c_chunks[9] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 5 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 6 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 7 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 8 },
};
const NvBlastBondDesc c_bonds[4] =
{
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f, 1.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f,-1.0f, 0.0f }, 0 }, { 3, 4 } },
{ { { 0.0f,-1.0f, 0.0f }, 1.0f, {-1.0f, 0.0f, 0.0f }, 0 }, { 1, 3 } },
{ { { 0.0f,-1.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 2, 4 } },
};
NvBlastAssetDesc assetDesc;
assetDesc.chunkCount = 9;
assetDesc.chunkDescs = c_chunks;
assetDesc.bondCount = 4;
assetDesc.bondDescs = c_bonds;
// create asset
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, messageLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
NvBlastChunkFractureData fractureCmd;
fractureCmd.chunkIndex = 1;
fractureCmd.health = 1.0f;
NvBlastFractureBuffers commands = { 0, 1, nullptr, &fractureCmd };
NvBlastChunkFractureData fractureEvt;
NvBlastFractureBuffers events = { 0, 1, nullptr, &fractureEvt };
NvBlastActorApplyFracture(&events, actor, &commands, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_EQ(1, events.chunkFractureCount);
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, messageLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, messageLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), messageLog, nullptr);
newActors.resize(newActorsCount);
EXPECT_EQ(5, newActorsCount);
EXPECT_EQ(actor, result.deletedActor);
// check if newActors don't contain original actor
EXPECT_TRUE(!std::any_of(newActors.begin(), newActors.end(), [&](const NvBlastActor* a) { return actor == a; }));
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], messageLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
alignedFree(asset);
}
TEST_F(APITest, SubsupportFracture)
{
const NvBlastAssetDesc& assetDesc = g_assetDescs[1]; // cube with subsupport
// create asset with chunk map
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, messageLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// first set of fracture commands
NvBlastChunkFractureData f1 = { 0, 1, 2.0f };
NvBlastChunkFractureData f3 = { 0, 3, 0.5f };
NvBlastChunkFractureData f5 = { 0, 5, 1.0f };
NvBlastChunkFractureData f7 = { 0, 7, 1.0f };
std::vector<NvBlastChunkFractureData> chunkFractureData;
chunkFractureData.reserve(assetDesc.chunkCount);
chunkFractureData.push_back(f1);
chunkFractureData.push_back(f3);
chunkFractureData.push_back(f5);
chunkFractureData.push_back(f7);
ASSERT_EQ(assetDesc.chunkCount, chunkFractureData.capacity());
ASSERT_EQ(4, chunkFractureData.size());
NvBlastFractureBuffers target = { 0, static_cast<uint32_t>(chunkFractureData.capacity()), nullptr, chunkFractureData.data() };
{
NvBlastFractureBuffers events = target;
NvBlastFractureBuffers commands = { 0, static_cast<uint32_t>(chunkFractureData.size()), nullptr, chunkFractureData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
ASSERT_EQ(4 + 8, events.chunkFractureCount); // all requested chunks take damage, and the children of one of them
}
// re-apply same set of commands
chunkFractureData.clear();
chunkFractureData.reserve(assetDesc.chunkCount);
chunkFractureData.push_back(f1);
chunkFractureData.push_back(f3);
chunkFractureData.push_back(f5);
chunkFractureData.push_back(f7);
ASSERT_EQ(assetDesc.chunkCount, chunkFractureData.capacity());
ASSERT_EQ(4, chunkFractureData.size());
{
NvBlastFractureBuffers events = target;
NvBlastFractureBuffers commands = { 0, static_cast<uint32_t>(chunkFractureData.size()), nullptr, chunkFractureData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
ASSERT_EQ(1, events.chunkFractureCount); // f3 has broken the chunk
}
// fracture all support chunks
// the chunks from the previous fractures must not be reported again (since they are all broken already)
NvBlastChunkFractureData f2 = { 0, 2, 2.0f }; // will damage chunk and children
NvBlastChunkFractureData f4 = { 0, 4, 0.5f }; // will damage chunk without creating children on split
NvBlastChunkFractureData f6 = { 0, 6, 2.0f }; // will damage chunk and children
NvBlastChunkFractureData f8 = { 0, 8, 1.0f }; // will damage chunk
chunkFractureData.clear();
chunkFractureData.reserve(assetDesc.chunkCount);
chunkFractureData.push_back(f1);
chunkFractureData.push_back(f2);
chunkFractureData.push_back(f3);
chunkFractureData.push_back(f4);
chunkFractureData.push_back(f5);
chunkFractureData.push_back(f6);
chunkFractureData.push_back(f7);
chunkFractureData.push_back(f8);
ASSERT_EQ(assetDesc.chunkCount, chunkFractureData.capacity());
ASSERT_EQ(8, chunkFractureData.size());
NvBlastFractureBuffers events = target;
{
NvBlastFractureBuffers commands = { 0, static_cast<uint32_t>(chunkFractureData.size()), nullptr, chunkFractureData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
ASSERT_EQ(4 + 8 + 8, events.chunkFractureCount); // the new fracture commands all apply, plus two of them damage their children too
}
for (size_t i = 0; i < events.chunkFractureCount; i++)
{
const uint32_t chunkIndex = events.chunkFractures[i].chunkIndex;
ASSERT_TRUE(chunkIndex != 1);
ASSERT_TRUE(chunkIndex != 3);
ASSERT_TRUE(chunkIndex != 5);
ASSERT_TRUE(chunkIndex != 7);
// literal values come from g_cube2ChunkDescs
bool isInSupportRange = chunkIndex <= 8 && chunkIndex >= 1;
bool isChildOfTwo = chunkIndex <= 24 && chunkIndex >= 17;
bool isChildOfSix = chunkIndex <= 56 && chunkIndex >= 49;
ASSERT_TRUE(isInSupportRange || isChildOfTwo || isChildOfSix);
}
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, messageLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, messageLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), messageLog, nullptr);
newActors.resize(newActorsCount);
EXPECT_EQ(64 - 8 + 1, newActorsCount);
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], messageLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
alignedFree(asset);
}
static bool hasWarned = false;
static void myLog(int type, const char* msg, const char* file, int line)
{
BlastBaseTest<-1, 0>::messageLog(type, msg, file, line);
hasWarned = true;
}
#define EXPECT_WARNING EXPECT_TRUE(hasWarned); hasWarned=false;
#define EXPECT_NO_WARNING EXPECT_FALSE(hasWarned); hasWarned=false;
TEST_F(APITest, FractureNoEvents)
{
static const uint32_t GUARD = 0xb1a57;
const uint32_t chunksCount = 17;
const NvBlastChunkDesc c_chunks[chunksCount] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 5 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 6 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 2, NvBlastChunkDesc::NoFlags, 7 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 2, NvBlastChunkDesc::NoFlags, 8 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 5, NvBlastChunkDesc::NoFlags, 9 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 5, NvBlastChunkDesc::NoFlags, 10 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 6, NvBlastChunkDesc::NoFlags, 11 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 6, NvBlastChunkDesc::NoFlags, 12 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 7, NvBlastChunkDesc::NoFlags, 13 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 7, NvBlastChunkDesc::NoFlags, 14 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 8, NvBlastChunkDesc::NoFlags, 15 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 8, NvBlastChunkDesc::NoFlags, 16 },
};
const NvBlastBondDesc c_bonds[3] =
{
// normal, area, centroid, userdata, chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 0.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 3.0f, 0.0f, 0.0f }, 0 }, { 3, 4 } },
};
NvBlastAssetDesc assetDesc = { chunksCount, c_chunks, 3, c_bonds };
// create asset with chunk map
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, myLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, myLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), myLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
std::vector<NvBlastChunkFractureData> cfData;
cfData.resize(0 + 1);
cfData[cfData.size() - 1].userdata = GUARD;
std::vector<NvBlastBondFractureData> bfData;
NvBlastChunkFractureData command[] =
{
{ 0, 1, 10.0f },
{ 0, 2, 10.0f },
};
NvBlastFractureBuffers commands = { 0, 2, nullptr, command };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_NO_WARNING; // events can be null
EXPECT_EQ(GUARD, cfData[cfData.size() - 1].userdata);
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
newActors.resize(newActorsCount);
EXPECT_EQ(9, newActorsCount);
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], myLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
alignedFree(asset);
EXPECT_NO_WARNING;
}
TEST_F(APITest, FractureBufferLimits)
{
static const uint32_t GUARD = 0xb1a57;
const uint32_t chunksCount = 17;
const NvBlastChunkDesc c_chunks[chunksCount] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 5 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 6 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 2, NvBlastChunkDesc::NoFlags, 7 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 2, NvBlastChunkDesc::NoFlags, 8 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 5, NvBlastChunkDesc::NoFlags, 9 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 5, NvBlastChunkDesc::NoFlags, 10 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 6, NvBlastChunkDesc::NoFlags, 11 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 6, NvBlastChunkDesc::NoFlags, 12 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 7, NvBlastChunkDesc::NoFlags, 13 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 7, NvBlastChunkDesc::NoFlags, 14 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 8, NvBlastChunkDesc::NoFlags, 15 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 8, NvBlastChunkDesc::NoFlags, 16 },
};
const NvBlastBondDesc c_bonds[3] =
{
// normal, area, centroid, userdata, chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 0.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 3.0f, 0.0f, 0.0f }, 0 }, { 3, 4 } },
};
NvBlastAssetDesc assetDesc = { chunksCount, c_chunks, 3, c_bonds };
{
// create asset with chunk map
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, myLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, myLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), myLog);
EXPECT_TRUE(asset != nullptr);
for (uint32_t i = 0; i < 14; i++)
{
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
std::vector<NvBlastChunkFractureData> cfData;
cfData.resize(i + 1);
cfData[cfData.size() - 1].userdata = GUARD;
std::vector<NvBlastBondFractureData> bfData;
NvBlastChunkFractureData command[] =
{
{ 0, 1, 10.0f },
{ 0, 2, 10.0f },
};
NvBlastFractureBuffers commands = { 0, 2, nullptr, command };
NvBlastFractureBuffers events = { static_cast<uint32_t>(bfData.size()), static_cast<uint32_t>(cfData.size()) - 1, bfData.data(), cfData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, myLog, nullptr);
EXPECT_WARNING;
EXPECT_EQ(GUARD, cfData[cfData.size() - 1].userdata);
EXPECT_EQ(i, events.chunkFractureCount);
for (uint32_t i = 0; i < events.chunkFractureCount; i++)
{
EXPECT_EQ(events.chunkFractures[i].chunkIndex, events.chunkFractures[i].userdata);
}
EXPECT_TRUE(NvBlastActorDeactivate(actor, myLog));
alignedFree(family);
}
{
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
std::vector<NvBlastChunkFractureData> cfData;
cfData.resize(14 + 1);
cfData[cfData.size() - 1].userdata = GUARD;
std::vector<NvBlastBondFractureData> bfData;
NvBlastChunkFractureData command[] =
{
{ 0, 1, 10.0f },
{ 0, 2, 10.0f },
};
NvBlastFractureBuffers commands = { 0, 2, nullptr, command };
NvBlastFractureBuffers events = { static_cast<uint32_t>(bfData.size()), static_cast<uint32_t>(cfData.size()) - 1, bfData.data(), cfData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_NO_WARNING;
EXPECT_EQ(14, events.chunkFractureCount);
for (uint32_t i = 0; i < events.chunkFractureCount; i++)
{
EXPECT_EQ(events.chunkFractures[i].chunkIndex, events.chunkFractures[i].userdata);
}
ASSERT_EQ(GUARD, cfData[cfData.size() - 1].userdata);
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
newActors.resize(newActorsCount);
EXPECT_EQ(9, newActorsCount);
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], myLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
}
alignedFree(asset);
}
EXPECT_NO_WARNING;
}
TEST_F(APITest, FractureBufferLimitsInSitu)
{
static const uint32_t GUARD = 0xb1a57;
const uint32_t chunksCount = 17;
const NvBlastChunkDesc c_chunks[chunksCount] =
{
// cenroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 5 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 1, NvBlastChunkDesc::NoFlags, 6 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 2, NvBlastChunkDesc::NoFlags, 7 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 2, NvBlastChunkDesc::NoFlags, 8 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 5, NvBlastChunkDesc::NoFlags, 9 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 5, NvBlastChunkDesc::NoFlags, 10 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 6, NvBlastChunkDesc::NoFlags, 11 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 6, NvBlastChunkDesc::NoFlags, 12 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 7, NvBlastChunkDesc::NoFlags, 13 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 7, NvBlastChunkDesc::NoFlags, 14 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 8, NvBlastChunkDesc::NoFlags, 15 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 8, NvBlastChunkDesc::NoFlags, 16 },
};
const NvBlastBondDesc c_bonds[3] =
{
// normal, area, centroid, userdata, chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 0.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 3.0f, 0.0f, 0.0f }, 0 }, { 3, 4 } },
};
NvBlastAssetDesc assetDesc = { chunksCount, c_chunks, 3, c_bonds };
{
// create asset with chunk map
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, myLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, myLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), myLog);
EXPECT_TRUE(asset != nullptr);
for (uint32_t i = 0; i < 14 - 2; i++)
{
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
std::vector<NvBlastChunkFractureData> cfData;
cfData.resize(2 + i + 1);
std::vector<NvBlastBondFractureData> bfData;
cfData[0].userdata = 0;
cfData[0].chunkIndex = 1;
cfData[0].health = 10.0f;
cfData[1].userdata = 0;
cfData[1].chunkIndex = 2;
cfData[1].health = 10.0f;
cfData[2 + i].userdata = GUARD;
NvBlastFractureBuffers commands = { 0, 2, nullptr, cfData.data() };
NvBlastFractureBuffers events = { static_cast<uint32_t>(bfData.size()), static_cast<uint32_t>(cfData.size()) - 1, bfData.data(), cfData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_WARNING;
EXPECT_EQ(GUARD, cfData[cfData.size() - 1].userdata);
EXPECT_EQ(2 + i, events.chunkFractureCount);
for (uint32_t i = 0; i < events.chunkFractureCount; i++)
{
EXPECT_EQ(events.chunkFractures[i].chunkIndex, events.chunkFractures[i].userdata);
}
EXPECT_TRUE(NvBlastActorDeactivate(actor, myLog));
alignedFree(family);
}
{
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
std::vector<NvBlastChunkFractureData> cfData;
cfData.resize(14 + 1);
cfData[cfData.size() - 1].userdata = GUARD;
std::vector<NvBlastBondFractureData> bfData;
cfData[0].userdata = 0;
cfData[0].chunkIndex = 1;
cfData[0].health = 10.0f;
cfData[1].userdata = 0;
cfData[1].chunkIndex = 2;
cfData[1].health = 10.0f;
cfData[14].userdata = GUARD;
NvBlastFractureBuffers commands = { 0, 2, nullptr, cfData.data() };
NvBlastFractureBuffers events = { static_cast<uint32_t>(bfData.size()), static_cast<uint32_t>(cfData.size()) - 1, bfData.data(), cfData.data() };
NvBlastActorApplyFracture(&events, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_NO_WARNING;
EXPECT_EQ(14, events.chunkFractureCount);
for (uint32_t i = 0; i < events.chunkFractureCount; i++)
{
EXPECT_EQ(events.chunkFractures[i].chunkIndex, events.chunkFractures[i].userdata);
}
ASSERT_EQ(GUARD, cfData[cfData.size() - 1].userdata);
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
newActors.resize(newActorsCount);
EXPECT_EQ(9, newActorsCount);
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], myLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
}
alignedFree(asset);
}
EXPECT_NO_WARNING;
}
/*
This test checks if bond or chunk fracture commands passed to NvBlastActorApplyFracture do not correspond to
the actor passed in they (commands) will be ignored and warning message will be fired.
*/
TEST_F(APITest, FractureWarnAndFilterOtherActorCommands)
{
const uint32_t chunksCount = 17;
const NvBlastChunkDesc c_chunks[chunksCount] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 1, NvBlastChunkDesc::NoFlags, 5 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 1, NvBlastChunkDesc::NoFlags, 6 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 2, NvBlastChunkDesc::NoFlags, 7 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 2, NvBlastChunkDesc::NoFlags, 8 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 5, NvBlastChunkDesc::NoFlags, 9 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 5, NvBlastChunkDesc::NoFlags, 10 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 6, NvBlastChunkDesc::NoFlags, 11 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 6, NvBlastChunkDesc::NoFlags, 12 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 7, NvBlastChunkDesc::NoFlags, 13 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 7, NvBlastChunkDesc::NoFlags, 14 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 8, NvBlastChunkDesc::NoFlags, 15 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 8, NvBlastChunkDesc::NoFlags, 16 },
};
const NvBlastBondDesc c_bonds[4] =
{
// normal, area, centroid, userdata, chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 }, { 3, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 }, { 1, 3 } }
};
NvBlastAssetDesc assetDesc = { chunksCount, c_chunks, 4, c_bonds };
// create asset with chunk map
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, myLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, myLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), myLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
// split in 2
std::vector<NvBlastActor*> actors;
{
NvBlastBondFractureData command[] =
{
{ 0, 0, 2, 10.0f },
{ 0, 1, 2, 10.0f }
};
NvBlastFractureBuffers commands = { 2, 0, command, nullptr };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(2, newActorsCount);
EXPECT_EQ(actor, result.deletedActor);
actors.insert(actors.begin(), result.newActors, result.newActors + newActorsCount);
}
// damage bonds belonging to other actors, nothing expected to be broken
{
for (uint32_t i = 0; i < actors.size(); ++i)
{
NvBlastActor* actor = actors[i];
NvBlastActor* otherActor = actors[(i + 1) % 2];
// get graph nodes check
std::vector<uint32_t> graphNodeIndices;
graphNodeIndices.resize(NvBlastActorGetGraphNodeCount(otherActor, nullptr));
uint32_t graphNodesCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices.data(), (uint32_t)graphNodeIndices.size(), otherActor, nullptr);
EXPECT_EQ(graphNodesCount, 2);
NvBlastBondFractureData command[] =
{
{ 0, graphNodeIndices[0], graphNodeIndices[1], 10.0f }
};
NvBlastFractureBuffers commands = { 1, 0, command, nullptr };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_WARNING;
EXPECT_FALSE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(0, newActorsCount);
EXPECT_EQ(nullptr, result.deletedActor);
}
}
// damage bonds, split actors in 2 each
std::vector<NvBlastActor*> actors2;
{
for (uint32_t i = 0; i < 2; ++i)
{
NvBlastActor* actor = actors[i];
// get graph nodes check
std::vector<uint32_t> graphNodeIndices;
graphNodeIndices.resize(NvBlastActorGetGraphNodeCount(actor, nullptr));
uint32_t graphNodesCount = NvBlastActorGetGraphNodeIndices(graphNodeIndices.data(), (uint32_t)graphNodeIndices.size(), actor, nullptr);
EXPECT_EQ(graphNodesCount, 2);
NvBlastBondFractureData command[] =
{
{ 0, graphNodeIndices[0], graphNodeIndices[1], 10.0f }
};
NvBlastFractureBuffers commands = { 1, 0, command, nullptr };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(2, newActorsCount);
EXPECT_EQ(actor, result.deletedActor);
actors2.insert(actors2.begin(), result.newActors, result.newActors + newActorsCount);
}
}
// damage chunk belonging to other actor (expect no split or damage taken)
{
for (uint32_t i = 0; i < actors.size(); ++i)
{
NvBlastActor* actor = actors[i];
NvBlastActor* otherActor = actors[(i + 1) % 2];
uint32_t chunkToDamage;
NvBlastActorGetVisibleChunkIndices(&chunkToDamage, 1, otherActor, myLog);
NvBlastChunkFractureData command[] =
{
{ 0, chunkToDamage, 0.9f },
};
NvBlastFractureBuffers commands = { 0, 1, nullptr, command };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_WARNING;
EXPECT_FALSE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(0, newActorsCount);
EXPECT_EQ(nullptr, result.deletedActor);
EXPECT_EQ(1, NvBlastActorGetVisibleChunkCount(actor, myLog));
uint32_t chunkIndex;
NvBlastActorGetVisibleChunkIndices(&chunkIndex, 1, actor, myLog);
EXPECT_NE(chunkToDamage, chunkIndex);
}
}
for (NvBlastActor* actor : actors2)
{
NvBlastActorDeactivate(actor, myLog);
}
alignedFree(family);
alignedFree(asset);
EXPECT_NO_WARNING;
}
/**
If duplicate bonds are passed asset create routine will ignore them (but fire warning)
We pass duplicated bonds to world chunk and fully fracture actor once.
*/
TEST_F(APITest, FractureWithBondDuplicates)
{
const uint32_t chunksCount = 17;
const NvBlastChunkDesc c_chunks[chunksCount] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 8 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 5, NvBlastChunkDesc::NoFlags, 9 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 5, NvBlastChunkDesc::NoFlags, 10 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 6, NvBlastChunkDesc::NoFlags, 11 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 6, NvBlastChunkDesc::NoFlags, 12 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 7, NvBlastChunkDesc::NoFlags, 13 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 7, NvBlastChunkDesc::NoFlags, 14 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 8, NvBlastChunkDesc::NoFlags, 15 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 8, NvBlastChunkDesc::NoFlags, 16 },
};
const uint32_t bondCount = 20;
const uint32_t world = ~(uint32_t)0; // world chunk => invalid index
const NvBlastBondDesc c_bonds[bondCount] =
{
// normal, area, centroid, userdata, chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 1, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 2, 1 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 2, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 2, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 3, 1 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 4, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 4, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 4, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 5, 1 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 5, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 6, 5 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 6, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 6, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 6, world } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 7, 5 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 7, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 8, 7 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 8, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 8, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 },{ 8, world } }
};
NvBlastAssetDesc assetDesc = { chunksCount, c_chunks, bondCount, c_bonds };
// create asset
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, myLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, myLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), myLog);
EXPECT_WARNING;
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
// split in 2
std::vector<NvBlastActor*> actors;
{
NvBlastExtRadialDamageDesc damage = {
10.0f, // compressive
{ 0.0f, 0.0f, 0.0f }, // position
100.0f, // min radius - maximum damage
100.0f // max radius - zero damage
};
NvBlastBondFractureData outBondFracture[bondCount];
NvBlastChunkFractureData outChunkFracture[chunksCount];
NvBlastFractureBuffers events;
events.bondFractureCount = 2;
events.bondFractures = outBondFracture;
events.chunkFractureCount = 2;
events.chunkFractures = outChunkFracture;
NvBlastExtProgramParams programParams = { &damage, nullptr };
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
NvBlastExtFalloffSubgraphShader
};
NvBlastActorGenerateFracture(&events, actor, program, &programParams, myLog, nullptr);
NvBlastActorApplyFracture(nullptr, actor, &events, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(8, newActorsCount);
EXPECT_EQ(actor, result.deletedActor);
actors.insert(actors.begin(), result.newActors, result.newActors + newActorsCount);
}
for (NvBlastActor* actor : actors)
{
NvBlastActorDeactivate(actor, myLog);
}
alignedFree(family);
alignedFree(asset);
EXPECT_NO_WARNING;
}
#if 0
TEST(APITest, UserChunkMap)
{
for (int i = 0; i < 2; ++i)
{
// Choose descriptor list
const NvBlastAssetDesc* descs = nullptr;
size_t size = 0;
switch (i)
{
case 0:
descs = g_assetDescs;
size = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
break;
case 1:
descs = g_assetDescsMissingCoverage;
size = sizeof(g_assetDescsMissingCoverage) / sizeof(g_assetDescsMissingCoverage[0]);
break;
default:
continue;
}
// Iterate over list
for (size_t j = 0; j < size; ++j)
{
// Create asset
const NvBlastAssetDesc* desc = descs + j;
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(desc));
std::vector<uint32_t> chunkMap(desc->chunkCount);
NvBlastAsset* asset = NvBlastCreateAsset(&chunkMap[0], desc, alignedAlloc<malloc>, scratch.data(), nullptr);
EXPECT_TRUE(asset);
// Test map
Nv::Blast::Asset& a = static_cast<Nv::Blast::Asset&>(asset);
uint32_t supportChunkCount = 0;
uint32_t subsupportChunkCount = 0;
for (uint32_t i = 0; i < desc->chunkCount; ++i)
{
const uint32_t map = chunkMap[i];
if (Nv::Blast::isInvalidIndex(map))
{
continue;
}
else if (map < a.m_firstSubsupportChunkIndex)
{
EXPECT_LT(map, asset.m_graph.m_nodeCount);
++supportChunkCount;
}
else
{
EXPECT_LT(map, asset.m_chunkCount);
EXPECT_GE(map, asset.m_graph.m_nodeCount);
++subsupportChunkCount;
}
}
EXPECT_EQ(supportChunkCount, asset.m_graph.m_nodeCount);
EXPECT_EQ(subsupportChunkCount, a.getLowerSupportChunkCount() - asset.m_graph.m_nodeCount);
// Release asset
NvBlastAssetRelease(asset, free, nullptr);
}
}
}
#endif
TEST_F(APITest, NoBondsSausage)
{
// create asset
const NvBlastChunkDesc c_chunks[4] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 1, NvBlastChunkDesc::NoFlags, 2 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 2, NvBlastChunkDesc::NoFlags, 3 }
};
NvBlastAssetDesc assetDesc;
assetDesc.chunkCount = 4;
assetDesc.chunkDescs = c_chunks;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, messageLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), messageLog);
const NvBlastChunk* chunks = NvBlastAssetGetChunks(asset, messageLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// check visible chunk
{
EXPECT_EQ(NvBlastActorGetVisibleChunkCount(actor, messageLog), 1);
uint32_t chunkIndex;
NvBlastActorGetVisibleChunkIndices(&chunkIndex, 1, actor, messageLog);
EXPECT_EQ(chunks[chunkIndex].userData, 0);
}
// damage
NvBlastExtRadialDamageDesc damage = {
10.0f, // compressive
{ 0.0f, 0.0f, 0.0f }, // position
4.0f, // min radius - maximum damage
6.0f // max radius - zero damage
};
NvBlastBondFractureData outBondFracture[2];
NvBlastChunkFractureData outChunkFracture[2];
NvBlastFractureBuffers events;
events.bondFractureCount = 2;
events.bondFractures = outBondFracture;
events.chunkFractureCount = 2;
events.chunkFractures = outChunkFracture;
NvBlastExtProgramParams programParams = { &damage, nullptr };
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
NvBlastExtFalloffSubgraphShader
};
NvBlastActorGenerateFracture(&events, actor, program, &programParams, messageLog, nullptr);
NvBlastActorApplyFracture(&events, actor, &events, messageLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
EXPECT_EQ(0, events.bondFractureCount);
EXPECT_EQ(1, events.chunkFractureCount);
// split
NvBlastActor* newActors[8]; /* num lower-support chunks? plus space for deletedActor */
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors;
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, messageLog));
size_t newActorsCount = NvBlastActorSplit(&result, actor, 8, scratch.data(), messageLog, nullptr);
EXPECT_EQ(1, newActorsCount);
EXPECT_EQ(true, result.deletedActor == actor);
// check visible chunk
{
EXPECT_EQ(NvBlastActorGetVisibleChunkCount(result.newActors[0], messageLog), 1);
uint32_t chunkIndex;
NvBlastActorGetVisibleChunkIndices(&chunkIndex, 1, result.newActors[0], messageLog);
EXPECT_EQ(chunks[chunkIndex].userData, 3);
}
// release all
for (uint32_t i = 0; i < newActorsCount; ++i)
{
const bool actorReleaseResult = NvBlastActorDeactivate(result.newActors[i], messageLog);
EXPECT_TRUE(actorReleaseResult);
}
alignedFree(family);
alignedFree(asset);
}
TEST_F(APITest, SplitOnlyWhenNecessary)
{
static const uint32_t GUARD = 0xb1a57;
const uint32_t chunksCount = 17;
const NvBlastChunkDesc c_chunks[chunksCount] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 1, NvBlastChunkDesc::NoFlags, 5 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 1, NvBlastChunkDesc::NoFlags, 6 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 2, NvBlastChunkDesc::NoFlags, 7 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 2, NvBlastChunkDesc::NoFlags, 8 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 5, NvBlastChunkDesc::NoFlags, 9 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 5, NvBlastChunkDesc::NoFlags, 10 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 6, NvBlastChunkDesc::NoFlags, 11 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 6, NvBlastChunkDesc::NoFlags, 12 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 7, NvBlastChunkDesc::NoFlags, 13 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 7, NvBlastChunkDesc::NoFlags, 14 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 8, NvBlastChunkDesc::NoFlags, 15 },
{ { 0.0f, 0.0f, 0.0f }, 0.0f, 8, NvBlastChunkDesc::NoFlags, 16 },
};
const NvBlastBondDesc c_bonds[4] =
{
// normal, area, centroid, userdata, chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 3, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 1.0f, 0.0f, 0.0f }, 0 }, { 1, 3 } }
};
NvBlastAssetDesc assetDesc = { chunksCount, c_chunks, 4, c_bonds };
// create asset with chunk map
std::vector<char> scratch((size_t)NvBlastGetRequiredScratchForCreateAsset(&assetDesc, myLog));
void* amem = alignedZeroedAlloc(NvBlastGetAssetMemorySize(&assetDesc, myLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &assetDesc, scratch.data(), myLog);
EXPECT_TRUE(asset != nullptr);
// create actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alignedZeroedAlloc(NvBlastAssetGetFamilyMemorySize(asset, myLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, myLog);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, myLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), myLog);
EXPECT_TRUE(actor != nullptr);
// damage health only (expect no split)
{
NvBlastBondFractureData command[] =
{
{ 0, 0, 1, 0.99f },
{ 0, 1, 2, 0.50f },
{ 0, 2, 3, 0.01f }
};
NvBlastFractureBuffers commands = { 3, 0, command, nullptr };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_FALSE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(0, newActorsCount);
EXPECT_EQ(nullptr, result.deletedActor);
EXPECT_EQ(1, NvBlastActorGetVisibleChunkCount(actor, myLog));
uint32_t chunkIndex;
NvBlastActorGetVisibleChunkIndices(&chunkIndex, 1, actor, myLog);
EXPECT_EQ(0, chunkIndex);
}
// break 1 bond (expect no split)
{
NvBlastBondFractureData command[] =
{
{ 0, 0, 2, 10.0f },
};
NvBlastFractureBuffers commands = { 1, 0, command, nullptr };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(0, newActorsCount);
EXPECT_EQ(nullptr, result.deletedActor);
EXPECT_EQ(1, NvBlastActorGetVisibleChunkCount(actor, myLog));
uint32_t chunkIndex;
NvBlastActorGetVisibleChunkIndices(&chunkIndex, 1, actor, myLog);
EXPECT_EQ(0, chunkIndex);
}
// split in 4
std::vector<NvBlastActor*> actors;
{
NvBlastBondFractureData command[] =
{
{ 0, 0, 1, 10.0f },
{ 0, 1, 2, 10.0f },
{ 0, 2, 3, 10.0f }
};
NvBlastFractureBuffers commands = { 3, 0, command, nullptr };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_TRUE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(4, newActorsCount);
EXPECT_EQ(actor, result.deletedActor);
actors.insert(actors.begin(), result.newActors, result.newActors + newActorsCount);
}
// damage chunk's health only (expect no split)
{
for (NvBlastActor* actor : actors)
{
uint32_t chunkToDamage;
NvBlastActorGetVisibleChunkIndices(&chunkToDamage, 1, actor, myLog);
NvBlastChunkFractureData command[] =
{
{ 0, chunkToDamage, 0.9f },
};
NvBlastFractureBuffers commands = { 0, 1, nullptr, command };
NvBlastActorApplyFracture(nullptr, actor, &commands, myLog, nullptr);
EXPECT_FALSE(NvBlastActorIsSplitRequired(actor, messageLog));
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, myLog));
std::vector<NvBlastActor*> newActors(NvBlastActorGetMaxActorCountForSplit(actor, myLog));
NvBlastActorSplitEvent result;
result.deletedActor = nullptr;
result.newActors = newActors.data();
size_t newActorsCount = NvBlastActorSplit(&result, actor, static_cast<uint32_t>(newActors.size()), scratch.data(), myLog, nullptr);
EXPECT_EQ(0, newActorsCount);
EXPECT_EQ(nullptr, result.deletedActor);
EXPECT_EQ(1, NvBlastActorGetVisibleChunkCount(actor, myLog));
uint32_t chunkIndex;
NvBlastActorGetVisibleChunkIndices(&chunkIndex, 1, actor, myLog);
EXPECT_EQ(chunkToDamage, chunkIndex);
}
}
for (NvBlastActor* actor : actors)
{
NvBlastActorDeactivate(actor, myLog);
}
alignedFree(family);
alignedFree(asset);
EXPECT_NO_WARNING;
}
#if NV_WINDOWS_FAMILY
#include <windows.h>
TEST_F(APITest,CExportsNoNameMangling)
{
//
// tests the lib-link-free approach using unmangled names (extern "C")
//
const char* dllName = "NvBlast.dll";
HMODULE dllHandle = LoadLibrary(TEXT(dllName));
DWORD error = GetLastError();
ASSERT_TRUE(dllHandle != nullptr);
// Asset functions
typedef size_t(*NvBlastGetRequiredScratchForCreateAsset)(const NvBlastAssetDesc* desc);
typedef size_t(*NvBlastGetAssetMemorySize)(const NvBlastAssetDesc* desc);
typedef NvBlastAsset*(*NvBlastCreateAsset)(void* mem, const NvBlastAssetDesc* desc, void* scratch, NvBlastLog logFn);
NvBlastGetRequiredScratchForCreateAsset assetCreateRequiredScratch = (NvBlastGetRequiredScratchForCreateAsset)GetProcAddress(dllHandle, TEXT("NvBlastGetRequiredScratchForCreateAsset"));
ASSERT_TRUE(assetCreateRequiredScratch != nullptr);
NvBlastGetAssetMemorySize assetGetMemorySize = (NvBlastGetAssetMemorySize)GetProcAddress(dllHandle, TEXT("NvBlastGetAssetMemorySize"));
ASSERT_TRUE(assetGetMemorySize != nullptr);
NvBlastCreateAsset assetCreate = (NvBlastCreateAsset)GetProcAddress(dllHandle, TEXT("NvBlastCreateAsset"));
ASSERT_TRUE(assetCreate != nullptr);
// Family functions
typedef NvBlastFamily* (*NvBlastAssetCreateFamily)(void* mem, const NvBlastAsset* asset, NvBlastLog logFn);
typedef size_t(*NVBLASTASSETGETFAMILYMEMORYSIZE)(const NvBlastAsset* asset);
NVBLASTASSETGETFAMILYMEMORYSIZE familyGetMemorySize = (NVBLASTASSETGETFAMILYMEMORYSIZE)GetProcAddress(dllHandle, TEXT("NvBlastAssetGetFamilyMemorySize"));
ASSERT_TRUE(familyGetMemorySize != nullptr);
NvBlastAssetCreateFamily familyCreate = (NvBlastAssetCreateFamily)GetProcAddress(dllHandle, TEXT("NvBlastAssetCreateFamily"));
ASSERT_TRUE(familyCreate != nullptr);
// Actor functions
typedef size_t(*NvBlastFamilyGetRequiredScratchForCreateFirstActor)(const NvBlastFamily* family);
typedef NvBlastActor* (*NvBlastFamilyCreateFirstActor)(NvBlastFamily* family, const NvBlastActorDesc* desc, void* scratch, NvBlastLog logFn);
typedef bool(*NVBLASTACTORDEACTIVATE)(NvBlastActor* actor);
NvBlastFamilyGetRequiredScratchForCreateFirstActor actorcreaterequiredscratch = (NvBlastFamilyGetRequiredScratchForCreateFirstActor)GetProcAddress(dllHandle, TEXT("NvBlastFamilyGetRequiredScratchForCreateFirstActor"));
ASSERT_TRUE(actorcreaterequiredscratch != nullptr);
NvBlastFamilyCreateFirstActor actorCreate = (NvBlastFamilyCreateFirstActor)GetProcAddress(dllHandle, TEXT("NvBlastFamilyCreateFirstActor"));
ASSERT_TRUE(actorCreate != nullptr);
NVBLASTACTORDEACTIVATE actorRelease = (NVBLASTACTORDEACTIVATE)GetProcAddress(dllHandle, TEXT("NvBlastActorDeactivate"));
ASSERT_TRUE(actorRelease != nullptr);
const NvBlastChunkDesc c_chunks[] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 0 },
{ {0.0f, 0.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 0 },
};
NvBlastAssetDesc assetDesc;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
assetDesc.chunkCount = 4;
assetDesc.chunkDescs = c_chunks;
NvBlastAsset* asset;
{
size_t requiredsize = assetCreateRequiredScratch(&assetDesc);
std::vector<char>scratch(requiredsize);
void* mem = alignedZeroedAlloc(assetGetMemorySize(&assetDesc));
asset = assetCreate(mem, &assetDesc, scratch.data(), myLog);
ASSERT_TRUE(asset != nullptr);
}
void* fmem = alignedZeroedAlloc(familyGetMemorySize(asset));
NvBlastFamily* family = familyCreate(fmem, asset, myLog);
{
NvBlastActorDesc actorD;
actorD.initialBondHealths = actorD.initialSupportChunkHealths = nullptr;
actorD.uniformInitialBondHealth = actorD.uniformInitialLowerSupportChunkHealth = 1.0f;
size_t requiredsize = actorcreaterequiredscratch(family);
std::vector<char>scratch(requiredsize);
NvBlastActor* actor = actorCreate(family, &actorD, scratch.data(), myLog);
ASSERT_TRUE(actor != nullptr);
ASSERT_TRUE(actorRelease(actor));
}
alignedFree(family);
alignedFree(asset);
EXPECT_NO_WARNING;
}
#endif
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/TkTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "TkBaseTest.h"
#include <map>
#include <random>
#include <algorithm>
#include <functional>
#include "NsMemoryBuffer.h"
#include "NvBlastTime.h"
struct ExpectedVisibleChunks
{
ExpectedVisibleChunks() :numActors(0), numChunks(0) {}
ExpectedVisibleChunks(size_t a, size_t c) :numActors(a), numChunks(c) {}
size_t numActors; size_t numChunks;
};
void testResults(std::vector<TkFamily*>& families, std::map<TkFamily*, ExpectedVisibleChunks>& expectedVisibleChunks)
{
size_t numActors = 0;
for (TkFamily* fam : families)
{
auto ex = expectedVisibleChunks[fam];
EXPECT_EQ(ex.numActors, fam->getActorCount());
numActors += ex.numActors;
std::vector<TkActor*> actors(fam->getActorCount());
fam->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (TkActor* actor : actors)
{
EXPECT_EQ(ex.numChunks, actor->getVisibleChunkCount());
}
}
size_t numActorsExpected = 0;
for (auto expected : expectedVisibleChunks)
{
numActorsExpected += expected.second.numActors;
}
EXPECT_EQ(numActorsExpected, numActors);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Tests
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
TEST_F(TkTestStrict, CreateFramework)
{
createFramework();
releaseFramework();
}
TEST_F(TkTestStrict, CreateAsset)
{
createFramework();
createTestAssets();
releaseTestAssets();
releaseFramework();
}
TEST_F(TkTestStrict, ActorDamageNoGroup)
{
createFramework();
createTestAssets();
TkFramework* fwk = NvBlastTkFrameworkGet();
TkActorDesc actorDesc;
actorDesc.asset = testAssets[0];
TkActor* actor = fwk->createActor(actorDesc);
const size_t bondFractureCount = 4;
NvBlastFractureBuffers commands;
NvBlastBondFractureData bdata[bondFractureCount];
for (uint32_t i = 0; i < bondFractureCount; i++)
{
bdata[i].nodeIndex0 = 2 * i + 0;
bdata[i].nodeIndex1 = 2 * i + 1;
bdata[i].health = 1.0f;
}
commands.bondFractureCount = bondFractureCount;
commands.bondFractures = bdata;
commands.chunkFractureCount = 0;
commands.chunkFractures = nullptr;
actor->applyFracture(&commands, &commands);
TkFamily& family = actor->getFamily();
EXPECT_TRUE(commands.bondFractureCount == 4);
EXPECT_TRUE(actor->isPending());
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fwk->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
group->addActor(*actor);
m_groupTM->process();
m_groupTM->wait();
EXPECT_FALSE(actor->isPending());
EXPECT_EQ(2, family.getActorCount());
releaseFramework();
}
TEST_F(TkTestStrict, ActorDamageGroup)
{
TEST_ZONE_BEGIN("ActorDamageGroup");
createFramework();
createTestAssets();
TkFramework* fwk = NvBlastTkFrameworkGet();
TestFamilyTracker ftrack1, ftrack2;
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fwk->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
NvBlastExtShearDamageDesc shearDamage = getShearDamageDesc(0, 0, 0);
NvBlastExtProgramParams shearDamageParams = { &shearDamage, nullptr };
CSParams csDamage0(0, 0.0f);
NvBlastExtProgramParams csDamageParams0 = { &csDamage0, nullptr };
CSParams csDamage1(1, 0.0f);
NvBlastExtProgramParams csDamageParams1 = { &csDamage1, nullptr };
CSParams csDamage2(2, 0.0f);
NvBlastExtProgramParams csDamageParams2 = { &csDamage2, nullptr };
std::vector<TkFamily*> families;
TkFamily* trackedFamily;
std::map<TkFamily*, ExpectedVisibleChunks> expectedVisibleChunks;
{
TkActorDesc adesc(testAssets[0]);
TkActor* actor1 = fwk->createActor(adesc);
EXPECT_TRUE(actor1 != nullptr);
TkActor* actor2 = fwk->createActor(adesc);
EXPECT_TRUE(actor2 != nullptr);
expectedVisibleChunks[&actor1->getFamily()] = ExpectedVisibleChunks(8, 1); // full damage
expectedVisibleChunks[&actor2->getFamily()] = ExpectedVisibleChunks(1, 1); // not split
GeneratorAsset cube;
TkAssetDesc assetDesc;
generateCube(cube, assetDesc, 5, 2);
assetDesc.bondFlags = nullptr;
TkAsset* cubeAsset = fwk->createAsset(assetDesc);
testAssets.push_back(cubeAsset);
TkActorDesc cubeAD(cubeAsset);
TkActor* cubeActor1 = fwk->createActor(cubeAD);
EXPECT_TRUE(cubeActor1 != nullptr);
trackedFamily = &cubeActor1->getFamily();
cubeActor1->getFamily().addListener(ftrack1);
TkActor* cubeActor2 = fwk->createActor(cubeAD);
EXPECT_TRUE(cubeActor2 != nullptr);
expectedVisibleChunks[&cubeActor1->getFamily()] = ExpectedVisibleChunks(2, 4); // split in 2, 4 chunks each
expectedVisibleChunks[&cubeActor2->getFamily()] = ExpectedVisibleChunks(1, 1); // not split
ftrack1.insertActor(cubeActor1);
ftrack2.insertActor(actor1);
actor1->getFamily().addListener(ftrack2);
TEST_ZONE_BEGIN("add to groups");
group->addActor(*cubeActor1);
group->addActor(*cubeActor2);
group->addActor(*actor1);
group->addActor(*actor2);
TEST_ZONE_END("add to groups");
families.push_back(&cubeActor1->getFamily());
families.push_back(&cubeActor2->getFamily());
families.push_back(&actor1->getFamily());
families.push_back(&actor2->getFamily());
cubeActor1->damage(getCubeSlicerProgram(), &csDamageParams0);
actor1->damage(getFalloffProgram(), &radialDamageParams);
}
EXPECT_FALSE(group->endProcess());
m_groupTM->process();
m_groupTM->wait();
testResults(families, expectedVisibleChunks);
{
std::vector<TkActor*> actors(trackedFamily->getActorCount());
trackedFamily->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (TkActor* actor : actors)
{
actor->damage(getCubeSlicerProgram(), &csDamageParams1);
}
}
expectedVisibleChunks[trackedFamily] = ExpectedVisibleChunks(4, 2);
m_groupTM->process();
m_groupTM->wait();
testResults(families, expectedVisibleChunks);
{
std::vector<TkActor*> actors(trackedFamily->getActorCount());
trackedFamily->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (TkActor* actor : actors)
{
actor->damage(getCubeSlicerProgram(), &csDamageParams2);
}
}
expectedVisibleChunks[trackedFamily] = ExpectedVisibleChunks(8, 1);
m_groupTM->process();
m_groupTM->wait();
testResults(families, expectedVisibleChunks);
{
std::vector<TkActor*> actors(trackedFamily->getActorCount());
trackedFamily->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
TEST_ZONE_BEGIN("damage");
for (TkActor* actor : actors)
{
actor->damage(getFalloffProgram(), &radialDamageParams);
}
TEST_ZONE_END("damage");
}
expectedVisibleChunks[trackedFamily] = ExpectedVisibleChunks(4096, 1);
m_groupTM->process();
m_groupTM->wait();
testResults(families, expectedVisibleChunks);
{
std::vector<TkActor*> actors(trackedFamily->getActorCount());
trackedFamily->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
TEST_ZONE_BEGIN("damage");
for (TkActor* actor : actors)
{
actor->damage(getShearProgram(), &shearDamageParams);
}
TEST_ZONE_END("damage");
}
m_groupTM->process();
m_groupTM->wait();
{
std::vector<TkActor*> actors(trackedFamily->getActorCount());
trackedFamily->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
TEST_ZONE_BEGIN("damage");
for (TkActor* actor : actors)
{
actor->damage(getShearProgram(), &shearDamageParams);
}
TEST_ZONE_END("damage");
}
m_groupTM->process();
m_groupTM->wait();
group->release();
TEST_ZONE_BEGIN("family release");
trackedFamily->release();
TEST_ZONE_END("family release");
releaseTestAssets();
releaseFramework();
TEST_ZONE_END("ActorDamageGroup");
}
TEST_F(TkTestStrict, ActorDamageMultiGroup)
{
createFramework();
createTestAssets();
TkFramework* fwk = NvBlastTkFrameworkGet();
TestFamilyTracker ftrack1, ftrack2;
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group0 = fwk->createGroup(gdesc);
EXPECT_TRUE(group0 != nullptr);
TkGroup* group1 = fwk->createGroup(gdesc);
EXPECT_TRUE(group1 != nullptr);
TkGroupTaskManager& gtm1 = *TkGroupTaskManager::create(*m_taskman, group1);
TkGroupTaskManager& gtm0 = *TkGroupTaskManager::create(*m_taskman, group0);
std::vector<TkFamily*> families(2);
std::map<TkFamily*, ExpectedVisibleChunks> expectedVisibleChunks;
CSParams csDamage0(0, 0.0f);
NvBlastExtProgramParams csDamageParams0 = { &csDamage0, nullptr };
CSParams csDamage1(1, 0.0f);
NvBlastExtProgramParams csDamageParams1 = { &csDamage1, nullptr };
CSParams csDamage2(2, 0.0f);
NvBlastExtProgramParams csDamageParams2 = { &csDamage2, nullptr };
// prepare 2 equal actors/families and damage
{
GeneratorAsset cube;
TkAssetDesc assetDesc;
generateCube(cube, assetDesc, 6, 2, 5);
assetDesc.bondFlags = nullptr;
TkAsset* cubeAsset = fwk->createAsset(assetDesc);
testAssets.push_back(cubeAsset);
TkActorDesc cubeAD(cubeAsset);
TkActor* cubeActor0 = fwk->createActor(cubeAD);
EXPECT_TRUE(cubeActor0 != nullptr);
cubeActor0->getFamily().addListener(ftrack1);
TkActor* cubeActor1 = fwk->createActor(cubeAD);
EXPECT_TRUE(cubeActor1 != nullptr);
cubeActor1->getFamily().addListener(ftrack2);
ftrack1.insertActor(cubeActor0);
ftrack2.insertActor(cubeActor1);
group0->addActor(*cubeActor0);
group1->addActor(*cubeActor1);
families[0] = (&cubeActor0->getFamily());
families[1] = (&cubeActor1->getFamily());
{
cubeActor0->damage(getCubeSlicerProgram(), &csDamageParams0);
cubeActor0->damage(getCubeSlicerProgram(), &csDamageParams1);
cubeActor1->damage(getCubeSlicerProgram(), &csDamageParams0);
}
expectedVisibleChunks[families[0]] = ExpectedVisibleChunks(4, 2); // split in 4, 2 chunks each
expectedVisibleChunks[families[1]] = ExpectedVisibleChunks(2, 4); // split in 2, 4 chunks each
}
// async process 2 groups
{
EXPECT_GT(gtm0.process(2), (uint32_t)0);
EXPECT_GT(gtm1.process(2), (uint32_t)0);
uint32_t completed = 0;
while (completed < 2)
{
if (gtm0.wait(false))
completed++;
if (gtm1.wait(false))
completed++;
}
}
// checks
testResults(families, expectedVisibleChunks);
EXPECT_EQ(families[0]->getActorCount(), 4);
EXPECT_EQ(group0->getActorCount(), 4);
EXPECT_EQ(families[1]->getActorCount(), 2);
EXPECT_EQ(group1->getActorCount(), 2);
// we have group0 with 4 actors 2 chunks:
// group0: [2]' [2]' [2]' [2]' (family0')
// group1: [4]'' [4]'' (family1'')
// rearrange:
// group0: [2]' [2]' [4]''
// group1: [4]'' [2]' [2]'
{
TkActor* group0Actors[2];
group0->getActors(group0Actors, 2, 1); // start index: 1, because..why not?
TkActor* group1Actors[2];
group1->getActors(group1Actors, 2, 0);
group0Actors[0]->removeFromGroup();
group1->addActor(*group0Actors[0]);
group0Actors[1]->removeFromGroup();
group1->addActor(*group0Actors[1]);
group1Actors[0]->removeFromGroup();
group0->addActor(*group1Actors[0]);
}
// checks
EXPECT_EQ(families[0]->getActorCount(), 4);
EXPECT_EQ(group0->getActorCount(), 3);
EXPECT_EQ(families[1]->getActorCount(), 2);
EXPECT_EQ(group1->getActorCount(), 3);
// damage all
{
TkActor* allActors[6];
families[0]->getActors(allActors, 4, 0);
families[1]->getActors(allActors + 4, 2, 0);
typedef std::pair<TkGroup*, TkFamily*> pair;
std::set<pair> combinations;
for (auto actor : allActors)
{
combinations.emplace(pair(actor->getGroup(), &actor->getFamily()));
if (actor->getVisibleChunkCount() == 4)
{
actor->damage(getCubeSlicerProgram(), &csDamageParams1);
}
actor->damage(getCubeSlicerProgram(), &csDamageParams2);
}
EXPECT_EQ(combinations.size(), 4);
expectedVisibleChunks[families[0]] = ExpectedVisibleChunks(8, 1); // split in 8, 1 chunks each
expectedVisibleChunks[families[1]] = ExpectedVisibleChunks(8, 1); // split in 8, 1 chunks each
}
// async process 2 groups
{
EXPECT_GT(gtm1.process(2), (uint32_t)0);
EXPECT_GT(gtm0.process(2), (uint32_t)0);
uint32_t completed = 0;
while (completed < 2)
{
if (gtm1.wait(false))
completed++;
if (gtm0.wait(false))
completed++;
}
}
// checks
testResults(families, expectedVisibleChunks);
EXPECT_EQ(families[0]->getActorCount(), 8);
EXPECT_EQ(ftrack1.actors.size(), 8);
EXPECT_EQ(group0->getActorCount(), 8);
EXPECT_EQ(families[1]->getActorCount(), 8);
EXPECT_EQ(ftrack2.actors.size(), 8);
EXPECT_EQ(group1->getActorCount(), 8);
// damage till the end, aggressively
std::default_random_engine re;
{
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
NvBlastExtShearDamageDesc shearDamage = getShearDamageDesc(0, 0, 0);
NvBlastExtProgramParams shearDamageParams = { &shearDamage, nullptr };
std::vector<TkActor*> actors;
while (1)
{
TEST_ZONE_BEGIN("damage loop");
uint32_t n0 = families[0]->getActorCount();
uint32_t n1 = families[1]->getActorCount();
actors.resize(n0 + n1);
families[0]->getActors(actors.data(), n0, 0);
families[1]->getActors(actors.data() + n0, n1, 0);
bool workTBD = false;
for (TkActor* actor : actors)
{
if (!NvBlastActorCanFracture(actor->getActorLL(), nullptr))
{
continue;
}
workTBD = true;
if (actor->getGraphNodeCount() > 1)
{
actor->damage(getFalloffProgram(), &radialDamageParams);
}
else
{
actor->damage(getShearProgram(), &shearDamageParams);
}
if (re() % 1000 < 500)
{
// switch group
TkGroup* newGroup = actor->getGroup() == group0 ? group1 : group0;
actor->removeFromGroup();
newGroup->addActor(*actor);
}
}
TEST_ZONE_END("damage loop");
if (!workTBD)
break;
// async process 2 groups
{
EXPECT_GT(gtm1.process(2), (uint32_t)0);
EXPECT_GT(gtm0.process(2), (uint32_t)0);
uint32_t completed = 0;
while (completed < 2)
{
if (gtm1.wait(false))
completed++;
if (gtm0.wait(false))
completed++;
}
}
}
}
// checks
EXPECT_EQ(families[0]->getActorCount(), ftrack1.actors.size());
EXPECT_EQ(families[1]->getActorCount(), ftrack2.actors.size());
EXPECT_EQ(65536, families[0]->getActorCount() + families[1]->getActorCount());
EXPECT_EQ(65536, group0->getActorCount() + group1->getActorCount());
gtm0.release();
gtm1.release();
group0->release();
group1->release();
for (auto f : families)
f->release();
releaseTestAssets();
releaseFramework();
}
TEST_F(TkTestStrict, ActorDamageBufferedDamage)
{
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
// group
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fwk->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
// random engine
std::default_random_engine re;
// cube asset
GeneratorAsset cube;
TkAssetDesc assetDesc;
generateCube(cube, assetDesc, 4, 2, 3);
assetDesc.bondFlags = nullptr;
TkAsset* cubeAsset = fwk->createAsset(assetDesc);
testAssets.push_back(cubeAsset);
// actor desc
TkActorDesc cubeAD(cubeAsset);
// test will be repated 'trials' times. Because of random shuffle inside.
const uint32_t trials = 100;
for (uint32_t i = 0; i < trials; i++)
{
// create actor
TkActor* actor = fwk->createActor(cubeAD);
EXPECT_TRUE(actor != nullptr);
TkFamily* family = (&actor->getFamily());
group->addActor(*actor);
// damage 3 times with CubeSlicer 2 * 2 * 2 = 8 actors
// damage 4 corners with falloff radial 4 * 2 = 8 actors
// total 16 actors
uint32_t expectedActorCount = 16;
// fallof params
const float P = 0.5f;
const float R = 0.35f;
// 2 of damage types would be through user's NvBlastDamageProgram, this pointer must live till group->sync()
NvBlastExtRadialDamageDesc userR0 = getRadialDamageDesc(P, P, 0, R, R);
NvBlastExtProgramParams userProgramParams0 = { &userR0, nullptr };
NvBlastExtRadialDamageDesc userR1 = getRadialDamageDesc(-P, P, 0, R, R);
NvBlastExtProgramParams userProgramParams1 = { &userR1, nullptr };
CSParams csDamage0(0, 0.0f);
NvBlastExtProgramParams csDamageParams0 = { &csDamage0, nullptr };
CSParams csDamage1(1, 0.0f);
NvBlastExtProgramParams csDamageParams1 = { &csDamage1, nullptr };
CSParams csDamage2(2, 0.0f);
NvBlastExtProgramParams csDamageParams2 = { &csDamage2, nullptr };
NvBlastExtRadialDamageDesc r0 = getRadialDamageDesc(P, -P, 0, R, R);
NvBlastExtProgramParams rDamageParams0 = { &r0, nullptr };
NvBlastExtRadialDamageDesc r1 = getRadialDamageDesc(-P, -P, 0, R, R);
NvBlastExtProgramParams rDamageParams1 = { &r1, nullptr };
// fill damage functions, shuffle and apply
{
const uint32_t damageCount = 7;
std::vector<std::function<void(void)>> damageFns(damageCount);
damageFns[0] = [&]() { actor->damage(getCubeSlicerProgram(), &csDamageParams0); };
damageFns[1] = [&]() { actor->damage(getCubeSlicerProgram(), &csDamageParams1); };
damageFns[2] = [&]() { actor->damage(getCubeSlicerProgram(), &csDamageParams2); };
damageFns[3] = [&]() { actor->damage(getFalloffProgram(), &rDamageParams0); };
damageFns[4] = [&]() { actor->damage(getFalloffProgram(), &rDamageParams1); };
damageFns[5] = [&]() { actor->damage(getFalloffProgram(), &userProgramParams0); };
damageFns[6] = [&]() { actor->damage(getFalloffProgram(), &userProgramParams1); };
// shuffle order!
std::shuffle(std::begin(damageFns), std::end(damageFns), re);
for (uint32_t i = 0; i < damageCount; i++)
{
damageFns[i]();
}
}
// sync
EXPECT_GT(m_groupTM->process(), (uint32_t)0);
m_groupTM->wait();
const auto ac = family->getActorCount();
// check
EXPECT_EQ(family->getActorCount(), expectedActorCount);
EXPECT_EQ(group->getActorCount(), expectedActorCount);
// release
std::vector<TkActor*> actors(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (auto a : actors)
a->removeFromGroup();
family->release();
}
group->release();
releaseFramework();
}
TEST_F(TkTestStrict, CreateActor)
{
createFramework();
TkFramework* framework = NvBlastTkFrameworkGet();
const uint32_t assetDescCount = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
std::vector<TkAsset*> assets(assetDescCount);
// assets
for (uint32_t i = 0; i < assetDescCount; ++i)
{
TkAssetDesc desc;
reinterpret_cast<NvBlastAssetDesc&>(desc) = g_assetDescs[i];
desc.bondFlags = nullptr;
assets[i] = framework->createAsset(desc);
EXPECT_TRUE(assets[i] != nullptr);
}
// actors
std::vector<TkActor*> actors;;
std::vector<TkFamily*> actorFamilies;;
for (const TkAsset* asset : assets)
{
for (int i = 0; i < 2; i++)
{
TkActorDesc desc(asset);
TkActor* actor = framework->createActor(desc);
EXPECT_TRUE(actor != nullptr);
EXPECT_TRUE(actor->getActorLL() != nullptr);
//EXPECT_TRUE(&actor->getFamily() != nullptr);
EXPECT_TRUE(actor->getFamily().getActorCount() == 1);
actors.push_back(actor);
EXPECT_TRUE(std::find(actorFamilies.begin(), actorFamilies.end(), &actor->getFamily()) == actorFamilies.end());
actorFamilies.push_back(&actor->getFamily());
}
}
// framework checks
{
std::vector<TkObject*> objects;
// assets
{
const TkType* assetType = framework->getType(TkTypeIndex::Asset);
objects.resize(framework->getObjectCount(*assetType));
EXPECT_TRUE(framework->getObjects(reinterpret_cast<TkIdentifiable**>(objects.data()), static_cast<uint32_t>(objects.size()), *assetType) == static_cast<uint32_t>(objects.size()));
ExpectArrayMatch(objects.data(), objects.size(), (TkObject**)assets.data(), assets.size());
}
// actors
# if(0) // framework does not track actors explicitly anymore
{
const TkType* actorType = framework->getType(TkTypeIndex::Actor);
objects.resize(framework->getObjectCount(*actorType));
EXPECT_TRUE(framework->getObjects(reinterpret_cast<TkIdentifiable**>(objects.data()), objects.size(), *actorType) == objects.size());
ExpectArrayMatch(objects.data(), objects.size(), (TkObject**)actors.data(), actors.size());
}
# endif
// families
{
const TkType* familyType = framework->getType(TkTypeIndex::Family);
objects.resize(framework->getObjectCount(*familyType));
EXPECT_TRUE(framework->getObjects(reinterpret_cast<TkIdentifiable**>(objects.data()), static_cast<uint32_t>(objects.size()), *familyType) == static_cast<uint32_t>(objects.size()));
ExpectArrayMatch(objects.data(), objects.size(), (TkObject**)actorFamilies.data(), actorFamilies.size());
}
}
// release
for (TkActor* actor : actors)
{
actor->release();
}
for (TkAsset* asset : assets)
{
asset->release();
}
releaseFramework();
}
template<int FailMask, int Verbosity>
TkFamily* TkBaseTest<FailMask, Verbosity>::familySerialization(TkFamily* family)
{
#if 0
TkFramework* fw = NvBlastTkFrameworkGet();
const TkType* familyType = fw->getType(TkTypeIndex::Family);
EXPECT_TRUE(familyType != nullptr);
PsMemoryBuffer* membuf = NVBLAST_NEW(PsMemoryBuffer);
EXPECT_TRUE(membuf != nullptr);
if (membuf != nullptr)
{
const bool result = family->serialize(*membuf);
EXPECT_EQ(true, result);
if (!result)
{
return family;
}
const size_t familyActorCount = family->getActorCount();
const TkAsset* familyAsset = family->getAsset();
family->release();
family = reinterpret_cast<TkFamily*>(fw->deserialize(*membuf));
EXPECT_TRUE(family != nullptr);
if (family != nullptr)
{
EXPECT_EQ(familyActorCount, family->getActorCount());
EXPECT_EQ(familyAsset, family->getAsset());
}
membuf->release();
}
return family;
#endif
return nullptr;
}
TEST_F(TkTestAllowWarnings, DISABLED_FamilySerialization)
{
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
// group
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fwk->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
// random engine
std::default_random_engine re;
// cube asset
TkAsset* cubeAsset = createCubeAsset(4, 2, 3, false);
// actor desc
TkActorDesc cubeAD(cubeAsset);
// create actor
TkActor* actor = fwk->createActor(cubeAD);
EXPECT_TRUE(actor != nullptr);
TkFamily* family = (&actor->getFamily());
// set an ID
NvBlastID id;
memcpy(id.data, "Observer-expectancy effect", sizeof(NvBlastID)); // Stuffing an arbitrary 16 bytes (The prefix of the given string)
cubeAsset->setID(id);
// serialize/deserialize
family = familySerialization(family);
// fill damage functions, apply one by one and serialize family in between
{
// damage 3 times with CubeSlicer 2 * 2 * 2 = 8 actors
// damage 4 corners with falloff radial 4 * 2 = 8 actors
// total 16 actors
uint32_t expectedActorCount = 16;
// cube slicer params
CSParams csDamage0(0, 0.0f);
NvBlastExtProgramParams csDamageParams0 = { &csDamage0, nullptr };
CSParams csDamage1(1, 0.0f);
NvBlastExtProgramParams csDamageParams1 = { &csDamage1, nullptr };
CSParams csDamage2(2, 0.0f);
NvBlastExtProgramParams csDamageParams2 = { &csDamage2, nullptr };
// fallof params
const float P = 0.5f;
const float R = 0.35f;
NvBlastExtRadialDamageDesc r0 = getRadialDamageDesc(P, P, 0, R, R);
NvBlastExtRadialDamageDesc r1 = getRadialDamageDesc(-P, P, 0, R, R);
NvBlastExtRadialDamageDesc r2 = getRadialDamageDesc(P, -P, 0, R, R);
NvBlastExtRadialDamageDesc r3 = getRadialDamageDesc(-P, -P, 0, R, R);
NvBlastExtProgramParams r0p = { &r0, nullptr };
NvBlastExtProgramParams r1p = { &r1, nullptr };
NvBlastExtProgramParams r2p = { &r2, nullptr };
NvBlastExtProgramParams r3p = { &r3, nullptr };
const uint32_t damageCount = 7;
std::vector<std::function<void(TkActor* a)>> damageFns(damageCount);
damageFns[0] = [&](TkActor* a) { a->damage(getCubeSlicerProgram(), &csDamageParams0); };
damageFns[1] = [&](TkActor* a) { a->damage(getCubeSlicerProgram(), &csDamageParams1); };
damageFns[2] = [&](TkActor* a) { a->damage(getCubeSlicerProgram(), &csDamageParams2); };
damageFns[3] = [&](TkActor* a) { a->damage(getFalloffProgram(), &r0p); };
damageFns[4] = [&](TkActor* a) { a->damage(getFalloffProgram(), &r1p); };
damageFns[5] = [&](TkActor* a) { a->damage(getFalloffProgram(), &r2p); };
damageFns[6] = [&](TkActor* a) { a->damage(getFalloffProgram(), &r3p); };
std::vector<TkActor*> actors(64);
for (uint32_t i = 0; i < damageCount; i++)
{
actors.resize(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
// damage
for (auto actor : actors)
{
group->addActor(*actor);
damageFns[i](actor);
}
// sync
EXPECT_GT(m_groupTM->process(), (uint32_t)0);
m_groupTM->wait();
family = familySerialization(family);
}
// check
EXPECT_EQ(family->getActorCount(), expectedActorCount);
}
// release
family->release();
group->release();
releaseFramework();
}
TEST_F(TkTestStrict, GroupStats)
{
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
// group
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fwk->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
TkAsset* cubeAsset = createCubeAsset(4, 2);
TkActorDesc cubeDesc(cubeAsset);
TkActor* cubeActor1 = fwk->createActor(cubeDesc);
TkActor* cubeActor2 = fwk->createActor(cubeDesc);
TkActor* cubeActor3 = fwk->createActor(cubeDesc);
TkActor* cubeActor4 = fwk->createActor(cubeDesc);
group->addActor(*cubeActor1);
group->addActor(*cubeActor2);
group->addActor(*cubeActor3);
group->addActor(*cubeActor4);
NvBlastExtRadialDamageDesc r0 = getRadialDamageDesc(0.0f, 0.0f, 0.0f);
NvBlastExtProgramParams radialDamageParams = { &r0, nullptr };
cubeActor1->damage(getFalloffProgram(), &radialDamageParams);
cubeActor2->damage(getFalloffProgram(), &radialDamageParams);
cubeActor3->damage(getFalloffProgram(), &radialDamageParams);
cubeActor4->damage(getFalloffProgram(), &radialDamageParams);
Nv::Blast::Time time;
m_groupTM->process();
m_groupTM->wait();
int64_t groupTime = time.getElapsedTicks();
TkGroupStats gstats;
group->getStats(gstats);
int64_t total = gstats.timers.fracture + gstats.timers.island + gstats.timers.material + gstats.timers.partition + gstats.timers.visibility;
#if NV_PROFILE
EXPECT_GT(total, 0); // some values are reported
EXPECT_LT(groupTime, total); // total LL time is higher than group time
EXPECT_GT((double)gstats.workerTime / groupTime, 2.0); // expect some minimal speedup (including overhead)
EXPECT_EQ(4, gstats.processedActorsCount); // actors processed
#endif
releaseFramework();
}
TEST_F(TkTestStrict, FractureReportSupport)
{
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
NvBlastChunkDesc chunkDescs[] =
{
{ { 0,0,0 }, 2, UINT32_MAX, NvBlastChunkDesc::SupportFlag, 'prnt' },
{ { -1,0,0 }, 1, 0, NvBlastChunkDesc::NoFlags, 'left' },
{ { +1,0,0 }, 1, 0, NvBlastChunkDesc::NoFlags, 'rght' },
};
TkAssetDesc assetDesc;
assetDesc.chunkCount = sizeof(chunkDescs) / sizeof(NvBlastChunkDesc);
assetDesc.chunkDescs = chunkDescs;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
assetDesc.bondFlags = nullptr;
const TkAsset* asset = fwk->createAsset(assetDesc);
TkActorDesc actorDesc;
actorDesc.asset = asset;
TkActor* actor = fwk->createActor(actorDesc);
actor->userData = (void*)'root';
class Listener : public TkEventListener
{
void receive(const TkEvent* events, uint32_t eventCount) override
{
for (uint32_t i = 0; i < eventCount; i++)
{
const TkEvent& event = events[i];
switch (event.type)
{
case TkJointUpdateEvent::EVENT_TYPE:
FAIL() << "not expecting joints here";
break;
case TkFractureCommands::EVENT_TYPE:
{
const TkActorData& actor = event.getPayload<TkFractureCommands>()->tkActorData;
// Group::sync still needed the family for SharedMemory management.
EXPECT_TRUE(nullptr != actor.family);
EXPECT_EQ((void*)'root', actor.userData);
EXPECT_EQ(0, actor.index);
}
break;
case TkFractureEvents::EVENT_TYPE:
{
const TkActorData& actor = event.getPayload<TkFractureEvents>()->tkActorData;
EXPECT_EQ((void*)'root', actor.userData);
EXPECT_EQ(0, actor.index);
}
break;
case TkSplitEvent::EVENT_TYPE:
{
const TkSplitEvent* split = event.getPayload<TkSplitEvent>();
EXPECT_TRUE(nullptr != split->parentData.family);
EXPECT_EQ((void*)'root', split->parentData.userData);
EXPECT_EQ(0, split->parentData.index);
EXPECT_EQ(2, split->numChildren);
EXPECT_EQ(1, split->children[0]->getVisibleChunkCount());
uint32_t visibleChunkIndex;
// child order is not mandatory
{
TkActor* a = split->children[0];
a->getVisibleChunkIndices(&visibleChunkIndex, 1);
uint32_t li = a->getIndex();
EXPECT_EQ(1, li);
EXPECT_EQ(split->parentData.family, &a->getFamily());
EXPECT_EQ('left', a->getAsset()->getChunks()[visibleChunkIndex].userData);
}
{
TkActor*a = split->children[1];
a->getVisibleChunkIndices(&visibleChunkIndex, 1);
uint32_t ri = a->getIndex();
EXPECT_EQ(2, ri);
EXPECT_EQ(split->parentData.family, &a->getFamily());
EXPECT_EQ('rght', a->getAsset()->getChunks()[visibleChunkIndex].userData);
}
}
break;
default:
FAIL() << "should not get here";
}
}
}
} listener;
actor->getFamily().addListener(listener);
// expected state for the original actor, see Listener
EXPECT_EQ((void*)'root', actor->userData);
EXPECT_EQ(0, actor->getIndex());
TkGroupDesc groupDesc = { m_taskman->getCpuDispatcher()->getWorkerCount() };
TkGroup* group = fwk->createGroup(groupDesc);
m_groupTM->setGroup(group);
group->addActor(*actor);
// this will trigger hierarchical chunk fracture
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
actor->damage(getFalloffProgram(), &radialDamageParams);
m_groupTM->process();
m_groupTM->wait();
releaseFramework();
}
TEST_F(TkTestStrict, FractureReportGraph)
{
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
NvBlastBond bondToBreak = { { 1, 0, 0 }, 1, { 0, 0, 0 }, 0 };
NvBlastBond bondToKeep = { { 1, 0, 0 }, 1, { 10, 10, 10 }, 0 };
NvBlastBondDesc bondDescs[] =
{
{ bondToKeep, { 1, 2 } },
{ bondToBreak, { 2, 3 } },
};
NvBlastChunkDesc chunkDescs[] =
{
{ { 0, 0, 0 }, 2, UINT32_MAX, NvBlastChunkDesc::NoFlags, 'root' },
{ { -1, 0, 0 }, 1, 0, NvBlastChunkDesc::SupportFlag, 'A' },
{ { +1, 0, 0 }, 1, 0, NvBlastChunkDesc::SupportFlag, 'B' },
{ { +1, 0, 0 }, 1, 0, NvBlastChunkDesc::SupportFlag, 'C' },
};
TkAssetDesc assetDesc;
assetDesc.chunkCount = sizeof(chunkDescs) / sizeof(NvBlastChunkDesc);
assetDesc.chunkDescs = chunkDescs;
assetDesc.bondCount = 2;
assetDesc.bondDescs = bondDescs;
assetDesc.bondFlags = nullptr;
const TkAsset* asset = fwk->createAsset(assetDesc);
TkActorDesc actorDesc;
actorDesc.asset = asset;
TkActor* rootActor = fwk->createActor(actorDesc);
rootActor->userData = (void*)'root';
class Listener : public TkEventListener
{
void receive(const TkEvent* events, uint32_t eventCount) override
{
for (uint32_t i = 0; i < eventCount; i++)
{
const TkEvent& event = events[i];
switch (event.type)
{
case TkJointUpdateEvent::EVENT_TYPE:
FAIL() << "not expecting joints here";
break;
case TkFractureCommands::EVENT_TYPE:
{
const TkActorData& actor = event.getPayload<TkFractureCommands>()->tkActorData;
// Group::sync still needed the family for SharedMemory management.
EXPECT_TRUE(nullptr != actor.family);
// original actor state is not preserved, the last test will fail
EXPECT_EQ((void*)'root', actor.userData);
EXPECT_EQ(0, actor.index);
// this information was invalid anyway
//EXPECT_EQ(1, actor->getVisibleChunkCount()) << "state not preserved";
}
break;
case TkFractureEvents::EVENT_TYPE:
{
const TkActorData& actor = event.getPayload<TkFractureEvents>()->tkActorData;
// Group::sync still needed the family for SharedMemory management.
EXPECT_TRUE(nullptr != actor.family);
// original actor state is not preserved, the last test will fail
EXPECT_EQ((void*)'root', actor.userData);
EXPECT_EQ(0, actor.index);
// this information was invalid anyway
//EXPECT_EQ(1, actor->getVisibleChunkCount()) << "state not preserved";
}
break;
case TkSplitEvent::EVENT_TYPE:
{
const TkSplitEvent* split = event.getPayload<TkSplitEvent>();
EXPECT_EQ((void*)'root', split->parentData.userData);
EXPECT_EQ(0, split->parentData.index);
EXPECT_EQ(2, split->numChildren);
uint32_t visibleChunkIndex[2];
// child order is not mandatory
{
TkActor* a = split->children[1];
EXPECT_EQ(2, a->getVisibleChunkCount()); // chunks A and B
a->getVisibleChunkIndices(visibleChunkIndex, 2);
uint32_t actorIndex = a->getIndex();
EXPECT_EQ(0, actorIndex); // same index as the original actor
// visible chunk order is not mandatory
EXPECT_EQ('B', a->getAsset()->getChunks()[visibleChunkIndex[0]].userData);
EXPECT_EQ('A', a->getAsset()->getChunks()[visibleChunkIndex[1]].userData);
}
{
TkActor* a = split->children[0];
EXPECT_EQ(1, a->getVisibleChunkCount());
a->getVisibleChunkIndices(visibleChunkIndex, 1);
uint32_t actorIndex = a->getIndex();
EXPECT_EQ(2, actorIndex);
EXPECT_EQ('C', a->getAsset()->getChunks()[visibleChunkIndex[0]].userData);
}
}
break;
default:
FAIL() << "should not get here";
}
}
}
} listener;
rootActor->getFamily().addListener(listener);
// expected state for the original actor, see Listener
EXPECT_EQ((void*)'root', rootActor->userData);
EXPECT_EQ(0, rootActor->getIndex());
EXPECT_EQ(1, rootActor->getVisibleChunkCount());
TkGroupDesc groupDesc = { m_taskman->getCpuDispatcher()->getWorkerCount() };
TkGroup* group = fwk->createGroup(groupDesc);
m_groupTM->setGroup(group);
group->addActor(*rootActor);
// this will trigger one bond to break
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0, 0.5f, 0.5f);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
rootActor->damage(getFalloffProgram(), &radialDamageParams);
m_groupTM->process();
m_groupTM->wait();
releaseFramework();
}
TEST_F(TkTestStrict, SplitWarning) // GWD-167
{
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
NvBlastChunkDesc chunkDescs[] =
{
{ { 0,0,0 }, 2, UINT32_MAX, NvBlastChunkDesc::SupportFlag, 'root' },
{ { -1,0,0 }, 1, 0, NvBlastChunkDesc::NoFlags, 'A' },
{ { +1,0,0 }, 1, 0, NvBlastChunkDesc::NoFlags, 'B' },
{ { -1,0,0 }, 1, 0, NvBlastChunkDesc::NoFlags, 'C' },
{ { +1,0,0 }, 1, 0, NvBlastChunkDesc::NoFlags, 'D' },
{ { -1,0,0 }, 1, 1, NvBlastChunkDesc::NoFlags, 'AAAA' },
{ { +1,0,0 }, 1, 2, NvBlastChunkDesc::NoFlags, 'BBBB' },
{ { -1,0,0 }, 1, 3, NvBlastChunkDesc::NoFlags, 'CCCC' },
{ { +1,0,0 }, 1, 4, NvBlastChunkDesc::NoFlags, 'DDDD' },
};
TkAssetDesc assetDesc;
assetDesc.chunkCount = sizeof(chunkDescs) / sizeof(NvBlastChunkDesc);
assetDesc.chunkDescs = chunkDescs;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
assetDesc.bondFlags = nullptr;
const TkAsset* asset = fwk->createAsset(assetDesc);
TkActorDesc actorDesc;
actorDesc.asset = asset;
TkActor* actor = fwk->createActor(actorDesc);
TkGroupDesc groupDesc = { m_taskman->getCpuDispatcher()->getWorkerCount() };
TkGroup* group = fwk->createGroup(groupDesc);
m_groupTM->setGroup(group);
group->addActor(*actor);
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
actor->damage(getFalloffProgram(), &radialDamageParams);
m_groupTM->process();
m_groupTM->wait();
releaseFramework();
}
TEST_F(TkTestAllowWarnings, ChangeThreadCountToZero)
{
// tests that group still allocates memory for one worker
// by replacing to a 0 threads cpu dispatcher (warns)
// mainly relies on internal asserts
class EventCounter : public TkEventListener
{
public:
EventCounter() :fracCommands(0), fracEvents(0) {}
void receive(const TkEvent* events, uint32_t eventCount)
{
for (uint32_t i = 0; i < eventCount; i++)
{
const TkEvent& event = events[i];
switch (event.type)
{
case TkFractureCommands::EVENT_TYPE:
fracCommands++;
break;
case TkFractureEvents::EVENT_TYPE:
fracEvents++;
break;
default:
FAIL();
// no split due to single chunk
// no joints
}
}
}
uint32_t fracCommands, fracEvents;
} listener;
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
NvBlastChunkDesc chunkDescs[] = {
{ { 0,0,0 }, 2, UINT32_MAX, NvBlastChunkDesc::SupportFlag, 'root' }
};
TkAssetDesc assetDesc;
assetDesc.chunkCount = sizeof(chunkDescs) / sizeof(NvBlastChunkDesc);
assetDesc.chunkDescs = chunkDescs;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
assetDesc.bondFlags = nullptr;
const TkAsset* asset = fwk->createAsset(assetDesc);
TkActorDesc actorDesc;
actorDesc.asset = asset;
TkActor* actor1 = fwk->createActor(actorDesc);
TkActor* actor2 = fwk->createActor(actorDesc);
TkActor* actor3 = fwk->createActor(actorDesc);
TkActor* actor4 = fwk->createActor(actorDesc);
actor1->getFamily().addListener(listener);
actor2->getFamily().addListener(listener);
actor3->getFamily().addListener(listener);
actor4->getFamily().addListener(listener);
TestCpuDispatcher* disp0 = new TestCpuDispatcher(0);
TestCpuDispatcher* disp4 = new TestCpuDispatcher(4);
m_taskman->setCpuDispatcher(*disp4);
TkGroupDesc groupDesc = { m_taskman->getCpuDispatcher()->getWorkerCount() };
TkGroup* group = fwk->createGroup(groupDesc);
m_groupTM->setGroup(group);
group->addActor(*actor1);
group->addActor(*actor2);
m_taskman->setCpuDispatcher(*disp0);
//group->setWorkerCount(m_taskman->getCpuDispatcher()->getWorkerCount());
group->addActor(*actor3);
group->addActor(*actor4);
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
actor1->damage(getFalloffProgram(), &radialDamageParams);
actor2->damage(getFalloffProgram(), &radialDamageParams);
actor3->damage(getFalloffProgram(), &radialDamageParams);
actor4->damage(getFalloffProgram(), &radialDamageParams);
m_groupTM->process();
m_groupTM->wait();
EXPECT_EQ(4, listener.fracCommands);
EXPECT_EQ(4, listener.fracEvents);
releaseFramework();
disp0->release();
disp4->release();
}
TEST_F(TkTestStrict, ChangeThreadCountUp)
{
// tests that group allocates more memory for additional workers
// by replacing to a higher thread count cpu dispatcher (warns)
// mainly relies on internal asserts
class EventCounter : public TkEventListener
{
public:
EventCounter() :fracCommands(0), fracEvents(0) {}
void receive(const TkEvent* events, uint32_t eventCount)
{
for (uint32_t i = 0; i < eventCount; i++)
{
const TkEvent& event = events[i];
switch (event.type)
{
case TkFractureCommands::EVENT_TYPE:
fracCommands++;
break;
case TkFractureEvents::EVENT_TYPE:
fracEvents++;
break;
default:
FAIL();
// no split due to single chunk
// no joints
}
}
}
uint32_t fracCommands, fracEvents;
} listener;
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
NvBlastChunkDesc chunkDescs[] = {
{ { 0,0,0 }, 2, UINT32_MAX, NvBlastChunkDesc::SupportFlag, 'root' }
};
TkAssetDesc assetDesc;
assetDesc.chunkCount = sizeof(chunkDescs) / sizeof(NvBlastChunkDesc);
assetDesc.chunkDescs = chunkDescs;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
assetDesc.bondFlags = nullptr;
const TkAsset* asset = fwk->createAsset(assetDesc);
TkActorDesc actorDesc;
actorDesc.asset = asset;
TkActor* actor1 = fwk->createActor(actorDesc);
TkActor* actor2 = fwk->createActor(actorDesc);
TkActor* actor3 = fwk->createActor(actorDesc);
TkActor* actor4 = fwk->createActor(actorDesc);
actor1->getFamily().addListener(listener);
actor2->getFamily().addListener(listener);
actor3->getFamily().addListener(listener);
actor4->getFamily().addListener(listener);
TestCpuDispatcher* disp2 = new TestCpuDispatcher(2);
TestCpuDispatcher* disp4 = new TestCpuDispatcher(4);
m_taskman->setCpuDispatcher(*disp2);
TkGroupDesc groupDesc = { m_taskman->getCpuDispatcher()->getWorkerCount() };
TkGroup* group = fwk->createGroup(groupDesc);
m_groupTM->setGroup(group);
group->addActor(*actor1);
group->addActor(*actor2);
group->addActor(*actor3);
group->addActor(*actor4);
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialDamageParams = { &radialDamage, nullptr };
actor1->damage(getFalloffProgram(), &radialDamageParams);
actor2->damage(getFalloffProgram(), &radialDamageParams);
actor3->damage(getFalloffProgram(), &radialDamageParams);
actor4->damage(getFalloffProgram(), &radialDamageParams);
m_taskman->setCpuDispatcher(*disp4);
//group->setWorkerCount(m_taskman->getCpuDispatcher()->getWorkerCount());
m_groupTM->process();
m_groupTM->wait();
EXPECT_EQ(4, listener.fracCommands);
EXPECT_EQ(4, listener.fracEvents);
releaseFramework();
disp2->release();
disp4->release();
}
TEST_F(TkTestAllowWarnings, GroupNoWorkers)
{
// tests that group still works without a taskmanager
// a warnings is expected
// mainly relies on internal asserts
class EventCounter : public TkEventListener
{
public:
EventCounter() :fracCommands(0), fracEvents(0) {}
void receive(const TkEvent* events, uint32_t eventCount)
{
for (uint32_t i = 0; i < eventCount; i++)
{
const TkEvent& event = events[i];
switch (event.type)
{
case TkFractureCommands::EVENT_TYPE:
fracCommands++;
break;
case TkFractureEvents::EVENT_TYPE:
fracEvents++;
break;
default:
FAIL();
// no split due to single chunk
// no joints
}
}
}
uint32_t fracCommands, fracEvents;
} listener;
createFramework();
TkFramework* fwk = NvBlastTkFrameworkGet();
NvBlastChunkDesc chunkDescs[] = {
{ { 0,0,0 }, 2, UINT32_MAX, NvBlastChunkDesc::SupportFlag, 'root' }
};
TkAssetDesc assetDesc;
assetDesc.chunkCount = sizeof(chunkDescs) / sizeof(NvBlastChunkDesc);
assetDesc.chunkDescs = chunkDescs;
assetDesc.bondCount = 0;
assetDesc.bondDescs = nullptr;
assetDesc.bondFlags = nullptr;
const TkAsset* asset = fwk->createAsset(assetDesc);
TkActorDesc actorDesc;
actorDesc.asset = asset;
TkActor* actor1 = fwk->createActor(actorDesc);
TkActor* actor2 = fwk->createActor(actorDesc);
TkActor* actor3 = fwk->createActor(actorDesc);
TkActor* actor4 = fwk->createActor(actorDesc);
actor1->getFamily().addListener(listener);
actor2->getFamily().addListener(listener);
actor3->getFamily().addListener(listener);
actor4->getFamily().addListener(listener);
TestCpuDispatcher* disp = new TestCpuDispatcher(0);
m_taskman->setCpuDispatcher(*disp);
TkGroupDesc groupDesc = { m_taskman->getCpuDispatcher()->getWorkerCount() };
TkGroup* group = fwk->createGroup(groupDesc);
m_groupTM->setGroup(group);
group->addActor(*actor1);
group->addActor(*actor2);
group->addActor(*actor3);
group->addActor(*actor4);
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams programParams = {
&radialDamage,
getDefaultMaterial()
};
actor1->damage(getFalloffProgram(), &programParams);
actor2->damage(getFalloffProgram(), &programParams);
actor3->damage(getFalloffProgram(), &programParams);
actor4->damage(getFalloffProgram(), &programParams);
m_groupTM->process();
m_groupTM->wait();
EXPECT_EQ(4, listener.fracCommands);
EXPECT_EQ(4, listener.fracEvents);
disp->release();
releaseFramework();
}
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/CoreTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include <algorithm>
#include "gtest/gtest.h"
//#include "NvBlast.h"
#include "NvBlastActor.h"
#include "NvBlastIndexFns.h"
#include "NvBlastGlobals.h"
#include "TestAssets.h"
#include "NvBlastActor.h"
static void messageLog(int type, const char* msg, const char* file, int line)
{
{
switch (type)
{
case NvBlastMessage::Error: std::cout << "NvBlast Error message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Warning: std::cout << "NvBlast Warning message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Info: std::cout << "NvBlast Info message in " << file << "(" << line << "): " << msg << "\n"; break;
case NvBlastMessage::Debug: std::cout << "NvBlast Debug message in " << file << "(" << line << "): " << msg << "\n"; break;
}
}
}
TEST(CoreTests, IndexStartLookup)
{
uint32_t lookup[32];
uint32_t indices[] = {1,1,2,2,4,4,4};
Nv::Blast::createIndexStartLookup<uint32_t>(lookup, 0, 30, indices, 7, 4);
EXPECT_EQ(lookup[0], 0);
EXPECT_EQ(lookup[1], 0);
EXPECT_EQ(lookup[2], 2);
EXPECT_EQ(lookup[3], 4);
EXPECT_EQ(lookup[4], 4);
EXPECT_EQ(lookup[5], 7);
EXPECT_EQ(lookup[31], 7);
}
#include "NvBlastGeometry.h"
int findClosestNodeByBonds(const float point[4], const NvBlastActor* actor)
{
const Nv::Blast::Actor* a = static_cast<const Nv::Blast::Actor*>(actor);
const NvBlastFamily* family = NvBlastActorGetFamily(actor, messageLog);
const NvBlastAsset* asset = NvBlastFamilyGetAsset(family, messageLog);
const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, messageLog);
return Nv::Blast::findClosestNode(
point,
a->getFirstGraphNodeIndex(),
a->getFamilyHeader()->getGraphNodeIndexLinks(),
graph.adjacencyPartition,
graph.adjacentNodeIndices,
graph.adjacentBondIndices,
NvBlastAssetGetBonds(asset, messageLog),
NvBlastActorGetBondHealths(actor, messageLog),
graph.chunkIndices
);
}
int findClosestNodeByChunks(const float point[4], const NvBlastActor* actor)
{
const Nv::Blast::Actor* a = static_cast<const Nv::Blast::Actor*>(actor);
return Nv::Blast::findClosestNode(
point,
a->getFirstGraphNodeIndex(),
a->getFamilyHeader()->getGraphNodeIndexLinks(),
a->getAsset()->m_graph.getAdjacencyPartition(),
a->getAsset()->m_graph.getAdjacentNodeIndices(),
a->getAsset()->m_graph.getAdjacentBondIndices(),
a->getAsset()->getBonds(),
a->getFamilyHeader()->getBondHealths(),
a->getAsset()->getChunks(),
a->getFamilyHeader()->getLowerSupportChunkHealths(),
a->getAsset()->m_graph.getChunkIndices()
);
}
TEST(CoreTests, FindChunkByPosition)
{
std::vector<char> scratch;
const NvBlastAssetDesc& desc = g_assetDescs[0]; // 1-cube
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, nullptr));
void* amem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&desc, nullptr));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &desc, scratch.data(), nullptr);
ASSERT_TRUE(asset != nullptr);
uint32_t expectedNode[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
const float positions[] = {
-2.0f, -2.0f, -2.0f,
+2.0f, -2.0f, -2.0f,
-2.0f, +2.0f, -2.0f,
+2.0f, +2.0f, -2.0f,
-2.0f, -2.0f, +2.0f,
+2.0f, -2.0f, +2.0f,
-2.0f, +2.0f, +2.0f,
+2.0f, +2.0f, +2.0f,
};
const float* pos = &positions[0];
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = NVBLAST_ALLOC(NvBlastAssetGetFamilyMemorySize(asset, nullptr));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, nullptr);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, nullptr));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), nullptr);
ASSERT_TRUE(actor != nullptr);
for (int i = 0; i < 8; ++i, pos += 3)
{
EXPECT_EQ(expectedNode[i], findClosestNodeByBonds(pos, actor));
EXPECT_EQ(expectedNode[i], findClosestNodeByChunks(pos, actor));
}
EXPECT_TRUE(NvBlastActorDeactivate(actor, nullptr));
NVBLAST_FREE(family);
NVBLAST_FREE(asset);
}
TEST(CoreTests, FindChunkByPositionUShape)
{
/*
considering this graph
4->5->6
^
|
1->2->3
and trying to find chunks by some position
*/
const NvBlastChunkDesc uchunks[7] =
{
// centroid volume parent idx flags ID
{ {3.0f, 2.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {1.0f, 1.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {3.0f, 1.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {5.0f, 1.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {1.0f, 3.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {3.0f, 3.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ {5.0f, 3.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 6 }
};
const NvBlastBondDesc ubonds[5] =
{
// normal area centroid userData chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 1.0f, 0.0f }, 0 }, { 2, 1 } }, // index swap should not matter
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 4.0f, 1.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 1.0f, 2.0f, 0.0f }, 0 }, { 1, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 3.0f, 0.0f }, 0 }, { 4, 5 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 4.0f, 3.0f, 0.0f }, 0 }, { 5, 6 } },
};
const NvBlastAssetDesc desc = { 7, uchunks, 5, ubonds };
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* amem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &desc, scratch.data(), messageLog);
ASSERT_TRUE(asset != nullptr);
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = NVBLAST_ALLOC(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, nullptr);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), nullptr);
ASSERT_TRUE(actor != nullptr);
srand(100);
for (uint32_t i = 0; i < 100000; i++)
{
float rx = 20 * (float)(rand() - 1) / RAND_MAX - 10;
float ry = 20 * (float)(rand() - 1) / RAND_MAX - 10;
float rz = 0.0f;
float rpos[] = { rx, ry, rz };
// open boundaries
uint32_t col = std::max(0, std::min(2, int(rx / 2)));
uint32_t row = std::max(0, std::min(1, int(ry / 2)));
uint32_t expectedNode = col + row * 3;
//printf("iteration %i: %.1f %.1f %.1f expected: %d\n", i, rpos[0], rpos[1], rpos[2], expectedNode);
{
uint32_t returnedNode = findClosestNodeByBonds(rpos, actor);
if (expectedNode != returnedNode)
findClosestNodeByBonds(rpos, actor);
EXPECT_EQ(expectedNode, returnedNode);
}
{
uint32_t returnedNode = findClosestNodeByChunks(rpos, actor);
if (expectedNode != returnedNode)
findClosestNodeByChunks(rpos, actor);
EXPECT_EQ(expectedNode, returnedNode);
}
}
EXPECT_TRUE(NvBlastActorDeactivate(actor, messageLog));
NVBLAST_FREE(family);
NVBLAST_FREE(asset);
}
TEST(CoreTests, FindChunkByPositionLandlocked)
{
// 7 > 8 > 9
// ^ ^ ^
// 4 > 5 > 6
// ^ ^ ^
// 1 > 2 > 3
// chunk 5 (node 4) is broken out (landlocked)
// find closest chunk/node on the two new actors
const NvBlastChunkDesc chunks[10] =
{
// centroid volume parent idx flags ID
{ {0.0f, 0.0f, 0.0f}, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {1.0f, 1.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ {3.0f, 1.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {5.0f, 1.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ {1.0f, 3.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {3.0f, 3.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ {5.0f, 3.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {1.0f, 5.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ {3.0f, 5.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 8 },
{ {5.0f, 5.0f, 0.0f}, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 9 },
};
const NvBlastBondDesc bonds[12] =
{
// normal area centroid userData chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 1.0f, 0.0f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 4.0f, 1.0f, 0.0f }, 0 }, { 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 3.0f, 0.0f }, 0 }, { 4, 5 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 4.0f, 3.0f, 0.0f }, 0 }, { 5, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 2.0f, 5.0f, 0.0f }, 0 }, { 7, 8 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 4.0f, 5.0f, 0.0f }, 0 }, { 8, 9 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 1.0f, 2.0f, 0.0f }, 0 }, { 1, 4 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 3.0f, 2.0f, 0.0f }, 0 }, { 2, 5 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 5.0f, 2.0f, 0.0f }, 0 }, { 3, 6 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 1.0f, 4.0f, 0.0f }, 0 }, { 4, 7 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 3.0f, 4.0f, 0.0f }, 0 }, { 5, 8 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 5.0f, 4.0f, 0.0f }, 0 }, { 6, 9 } },
};
const NvBlastAssetDesc desc = { 10, chunks, 12, bonds };
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* amem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &desc, scratch.data(), messageLog);
ASSERT_TRUE(asset != nullptr);
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = NVBLAST_ALLOC(NvBlastAssetGetFamilyMemorySize(asset, nullptr));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, nullptr);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, nullptr));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), nullptr);
ASSERT_TRUE(actor != nullptr);
float point[4] = { 3.0f, 3.0f, 0.0f };
EXPECT_EQ(4, findClosestNodeByChunks(point, actor));
EXPECT_EQ(4, findClosestNodeByBonds(point, actor));
NvBlastChunkFractureData chunkBuffer[1];
NvBlastFractureBuffers events = { 0, 1, nullptr, chunkBuffer };
NvBlastChunkFractureData chunkFracture = { 0, 5, 1.0f };
NvBlastFractureBuffers commands = { 0, 1, nullptr, &chunkFracture };
NvBlastActorApplyFracture(&events, actor, &commands, messageLog, nullptr);
EXPECT_EQ(1, events.chunkFractureCount);
NvBlastActor* newActors[5];
NvBlastActorSplitEvent splitEvent = { nullptr, newActors };
scratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, messageLog));
size_t newActorsCount = NvBlastActorSplit(&splitEvent, actor, 5, scratch.data(), messageLog, nullptr);
ASSERT_EQ(actor, newActors[1]);
EXPECT_NE(4, findClosestNodeByChunks(point, actor));
EXPECT_NE(4, findClosestNodeByBonds(point, actor));
float point2[4] = { 80.0f, 80.0f, 80.0f };
EXPECT_EQ(4, findClosestNodeByChunks(point2, newActors[0]));
EXPECT_EQ(4, findClosestNodeByBonds(point, newActors[0]));
for (uint32_t i = 0; i < newActorsCount; ++i)
{
EXPECT_TRUE(NvBlastActorDeactivate(newActors[i], nullptr));
}
NVBLAST_FREE(family);
NVBLAST_FREE(asset);
}
TEST(CoreTests, FindClosestByChunkAccuracy)
{
// (0,0) +---+-------+
// | | 1 |
// | 2 +---+---+
// | | 5 | |
// +---+---+ 4 |
// | 3 | |
// +-------+---+ (6,6)
// random point lookup over the actor's space
// tests would fail if findClosestNodeByChunks didn't improve accuracy with the help of bonds
const NvBlastChunkDesc chunks[6] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 0.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 4.0f, 1.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 1.0f, 2.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ { 2.0f, 5.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 5.0f, 4.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ { 3.0f, 3.0f, 0.0f }, 0.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
};
const NvBlastBondDesc bonds[8] =
{
// normal area centroid userData chunks
{ { { -1.0f, 0.0f, 0.0f }, 1.0f,{ 2.0f, 1.0f, 0.0f }, 0 },{ 1, 2 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 5.0f, 2.0f, 0.0f }, 0 },{ 1, 4 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 3.0f, 2.0f, 0.0f }, 0 },{ 5, 1 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 1.0f, 4.0f, 0.0f }, 0 },{ 2, 3 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 2.0f, 3.0f, 0.0f }, 0 },{ 2, 5 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 4.0f, 5.0f, 0.0f }, 0 },{ 3, 4 } },
{ { { 0.0f, -1.0f, 0.0f }, 1.0f,{ 3.0f, 4.0f, 0.0f }, 0 },{ 3, 5 } },
{ { { -1.0f, 0.0f, 0.0f }, 1.0f,{ 4.0f, 3.0f, 0.0f }, 0 },{ 4, 5 } },
};
const NvBlastAssetDesc desc = { 6, chunks, 8, bonds };
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* amem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(amem, &desc, scratch.data(), messageLog);
ASSERT_TRUE(asset != nullptr);
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = NVBLAST_ALLOC(NvBlastAssetGetFamilyMemorySize(asset, nullptr));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, nullptr);
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, nullptr));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), nullptr);
ASSERT_TRUE(actor != nullptr);
srand(0xb007);
for (uint32_t i = 0; i < 100000; i++)
{
float rx = 8 * (float)(rand()) / RAND_MAX - 1;
float ry = 8 * (float)(rand()) / RAND_MAX - 1;
float rz = 0.0f;
float rpos[] = { rx, ry, rz };
EXPECT_LE(-1.0f, rx); EXPECT_GE(7.0f, rx);
EXPECT_LE(-1.0f, ry); EXPECT_GE(7.0f, ry);
uint32_t expectedNode = 0xdefec7;
if (rx < 2.0f) {
if (ry < 4.0f) { expectedNode = 1; }
else { expectedNode = 2; }
}
else if (rx < 4.0f) {
if (ry < 2.0f) { expectedNode = 0; }
else if (ry < 4.0f) { expectedNode = 4; }
else { expectedNode = 2; }
}
else {
if (ry < 2.0f) { expectedNode = 0; }
else { expectedNode = 3; }
}
uint32_t nodeByBonds = findClosestNodeByBonds(rpos, actor);
if (nodeByBonds != expectedNode)
{
printf("%.1f %.1f %.1f\n", rx, ry, rz);
}
EXPECT_EQ(expectedNode, nodeByBonds);
uint32_t nodeByChunks = findClosestNodeByChunks(rpos, actor);
if (nodeByChunks != expectedNode)
{
printf("%.1f %.1f %.1f\n", rx, ry, rz);
}
EXPECT_EQ(expectedNode, nodeByChunks);
}
EXPECT_TRUE(NvBlastActorDeactivate(actor, messageLog));
NVBLAST_FREE(family);
NVBLAST_FREE(asset);
}
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/ActorTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "BlastBaseTest.h"
#include "AssetGenerator.h"
#include <map>
#include <random>
#include <algorithm>
#include "NvBlastActor.h"
#include "NvBlastExtDamageShaders.h"
#include "NvBlastExtLlSerialization.h"
#include "NvBlastExtSerialization.h"
static bool chooseRandomGraphNodes(uint32_t* g, uint32_t count, const Nv::Blast::Actor& actor)
{
const uint32_t graphNodeCount = actor.getGraphNodeCount();
if (graphNodeCount < count)
{
return false;
}
std::vector<uint32_t> graphNodeIndices(graphNodeCount);
uint32_t* index = graphNodeIndices.data();
for (Nv::Blast::Actor::GraphNodeIt i = actor; (bool)i ; ++i)
{
*index++ = (uint32_t)i;
}
struct UserDataSorter
{
UserDataSorter(const Nv::Blast::Actor& actor) : m_asset(*actor.getAsset()) {}
bool operator () (uint32_t i0, uint32_t i1) const
{
const uint32_t c0 = m_asset.m_graph.getChunkIndices()[i0];
const uint32_t c1 = m_asset.m_graph.getChunkIndices()[i1];
if (Nv::Blast::isInvalidIndex(c0) || Nv::Blast::isInvalidIndex(c1))
{
return c0 < c1;
}
return m_asset.getChunks()[c0].userData < m_asset.getChunks()[c1].userData;
}
const Nv::Blast::Asset& m_asset;
} userDataSorter(actor);
std::sort(graphNodeIndices.data(), graphNodeIndices.data() + graphNodeCount, userDataSorter);
#if 0
std::vector<uint32_t> descUserData(graphNodeCount);
for (uint32_t i = 0; i < graphNodeCount; ++i)
{
descUserData[i] = actor.getAsset()->m_chunks[actor.getAsset()->m_graph.m_chunkIndices[graphNodeIndices[i]]].userData;
}
#endif
uint32_t t = 0;
uint32_t m = 0;
for (uint32_t i = 0; i < graphNodeCount && m < count; ++i, ++t)
{
NVBLAST_ASSERT(t < graphNodeCount);
if (t >= graphNodeCount)
{
break;
}
const float U = (float)rand()/RAND_MAX; // U is uniform random number in [0,1)
if ((graphNodeCount - t)*U < count - m)
{
g[m++] = graphNodeIndices[i];
}
}
return m == count;
}
static void blast(std::set<NvBlastActor*>& actorsToDamage, GeneratorAsset* testAsset, GeneratorAsset::Vec3 localPos, float minRadius, float maxRadius, float compressiveDamage)
{
std::vector<NvBlastChunkFractureData> chunkEvents; /* num lower-support chunks + bonds */
std::vector<NvBlastBondFractureData> bondEvents; /* num lower-support chunks + bonds */
chunkEvents.resize(testAsset->solverChunks.size());
bondEvents.resize(testAsset->solverBonds.size());
std::vector<char> splitScratch;
std::vector<NvBlastActor*> newActorsBuffer(testAsset->solverChunks.size());
NvBlastExtRadialDamageDesc damage = {
compressiveDamage,
{ localPos.x, localPos.y, localPos.z },
minRadius,
maxRadius
};
NvBlastExtProgramParams programParams =
{
&damage,
nullptr
};
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
nullptr
};
size_t totalNewActorsCount = 0;
for (std::set<NvBlastActor*>::iterator k = actorsToDamage.begin(); k != actorsToDamage.end();)
{
NvBlastActor* actor = *k;
NvBlastFractureBuffers events = { static_cast<uint32_t>(bondEvents.size()), static_cast<uint32_t>(chunkEvents.size()), bondEvents.data(), chunkEvents.data() };
NvBlastActorGenerateFracture(&events, actor, program, &programParams, nullptr, nullptr);
NvBlastActorApplyFracture(&events, actor, &events, nullptr, nullptr);
const bool isDamaged = NvBlastActorIsSplitRequired(actor, nullptr);
bool removeActor = false;
if (events.bondFractureCount + events.chunkFractureCount > 0)
{
NvBlastActorSplitEvent splitEvent;
splitEvent.newActors = &newActorsBuffer.data()[totalNewActorsCount];
uint32_t newActorSize = (uint32_t)(newActorsBuffer.size() - totalNewActorsCount);
splitScratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, nullptr));
const size_t newActorsCount = NvBlastActorSplit(&splitEvent, actor, newActorSize, splitScratch.data(), nullptr, nullptr);
EXPECT_TRUE(isDamaged || newActorsCount == 0);
totalNewActorsCount += newActorsCount;
removeActor = splitEvent.deletedActor != NULL;
}
else
{
EXPECT_FALSE(isDamaged);
}
if (removeActor)
{
k = actorsToDamage.erase(k);
}
else
{
++k;
}
}
for (size_t i = 0; i < totalNewActorsCount; ++i)
{
actorsToDamage.insert(newActorsBuffer[i]);
}
}
template<int FailLevel, int Verbosity>
class ActorTest : public BlastBaseTest<FailLevel, Verbosity>
{
public:
ActorTest()
{
}
static void messageLog(int type, const char* msg, const char* file, int line)
{
BlastBaseTest<FailLevel, Verbosity>::messageLog(type, msg, file, line);
}
static void* alloc(size_t size)
{
return BlastBaseTest<FailLevel, Verbosity>::alignedZeroedAlloc(size);
}
static void free(void* mem)
{
BlastBaseTest<FailLevel, Verbosity>::alignedFree(mem);
}
NvBlastAsset* buildAsset(const NvBlastAssetDesc& desc)
{
// fix desc if wrong order or missing coverage first
NvBlastAssetDesc fixedDesc = desc;
std::vector<NvBlastChunkDesc> chunkDescs(desc.chunkDescs, desc.chunkDescs + desc.chunkCount);
std::vector<NvBlastBondDesc> bondDescs(desc.bondDescs, desc.bondDescs + desc.bondCount);
std::vector<uint32_t> chunkReorderMap(desc.chunkCount);
std::vector<char> scratch(desc.chunkCount * sizeof(NvBlastChunkDesc));
NvBlastEnsureAssetExactSupportCoverage(chunkDescs.data(), fixedDesc.chunkCount, scratch.data(), messageLog);
NvBlastReorderAssetDescChunks(chunkDescs.data(), fixedDesc.chunkCount, bondDescs.data(), fixedDesc.bondCount, chunkReorderMap.data(), true, scratch.data(), messageLog);
fixedDesc.chunkDescs = chunkDescs.data();
fixedDesc.bondDescs = bondDescs.empty() ? nullptr : bondDescs.data();
// create asset
m_scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&fixedDesc, messageLog));
void* mem = alloc(NvBlastGetAssetMemorySize(&fixedDesc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(mem, &fixedDesc, m_scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
return asset;
}
void buildAssets()
{
m_assets.resize(getAssetDescCount());
for (uint32_t i = 0; i < m_assets.size(); ++i)
{
m_assets[i] = buildAsset(g_assetDescs[i]);
}
}
NvBlastActor* instanceActor(const NvBlastAsset& asset)
{
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alloc(NvBlastAssetGetFamilyMemorySize(&asset, nullptr));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, &asset, nullptr);
std::vector<char> scratch((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
EXPECT_TRUE(scratch.capacity() > 0);
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
return actor;
}
void instanceActors()
{
m_actors.resize(m_assets.size());
for (uint32_t i = 0; i < m_actors.size(); ++i)
{
m_actors[i] = instanceActor(*m_assets[i]);
}
}
void releaseActors()
{
for (uint32_t i = 0; i < m_actors.size(); ++i)
{
NvBlastFamily* family = NvBlastActorGetFamily(m_actors[i], messageLog);
const bool actorReleaseResult = NvBlastActorDeactivate(m_actors[i], messageLog);
EXPECT_TRUE(actorReleaseResult);
free(family);
}
}
void destroyAssets()
{
for (uint32_t i = 0; i < m_assets.size(); ++i)
{
free(m_assets[i]);
}
}
void instanceAndPartitionRecursively
(
const NvBlastAsset& asset,
bool partitionToSubsupport,
void (*preSplitTest)(const Nv::Blast::Actor&, NvBlastLog),
void (*postSplitTest)(const std::vector<Nv::Blast::Actor*>&, uint32_t, uint32_t, bool)
)
{
const Nv::Blast::Asset& solverAsset = *static_cast<const Nv::Blast::Asset*>(&asset);
std::vector<Nv::Blast::Actor*> actors;
std::vector<Nv::Blast::Actor*> buffer(NvBlastAssetGetChunkCount(&asset, messageLog));
// Instance the first actor from the asset
actors.push_back(static_cast<Nv::Blast::Actor*>(instanceActor(asset)));
NvBlastFamily* family = NvBlastActorGetFamily(actors[0], messageLog);
const uint32_t supportChunkCount = NvBlastAssetGetSupportChunkCount(&asset, messageLog);
const uint32_t leafChunkCount = actors[0]->getAsset()->m_leafChunkCount;
// Now randomly partition the actors in the array, and keep going until we're down to single support chunks
bool canFracture = true;
while (canFracture)
{
canFracture = false;
for (uint32_t actorToPartition = 0; actorToPartition < actors.size(); ++actorToPartition)
{
Nv::Blast::Actor* a = (Nv::Blast::Actor*)actors[actorToPartition];
if (a == nullptr)
{
continue;
}
m_scratch.reserve((size_t)NvBlastActorGetRequiredScratchForSplit(a, messageLog));
if (preSplitTest)
{
preSplitTest(*a, nullptr);
}
const bool singleLowerSupportChunk = a->getGraphNodeCount() <= 1;
uint32_t newActorCount = 0;
for (int damageNum = 0; newActorCount < 2 && damageNum < 100; ++damageNum) // Avoid infinite loops
{
if (!singleLowerSupportChunk)
{
uint32_t g[2];
chooseRandomGraphNodes(g, 2, *a);
const uint32_t bondIndex = solverAsset.m_graph.findBond(g[0], g[1]);
if (bondIndex != Nv::Blast::invalidIndex<uint32_t>())
{
a->damageBond(g[0], g[1], bondIndex, 100.0f);
a->findIslands(m_scratch.data());
}
}
else
if (!partitionToSubsupport)
{
continue;
}
// Split actor
newActorCount = a->partition((Nv::Blast::Actor**)&buffer[0], (uint32_t)buffer.size(), messageLog);
if (newActorCount >= 2)
{
actors[actorToPartition] = nullptr;
}
}
if (newActorCount > 1)
{
canFracture = true;
}
for (uint32_t i = 0; i < newActorCount; ++i)
{
actors.push_back(buffer[i]);
buffer[i]->updateVisibleChunksFromGraphNodes();
}
}
}
if (postSplitTest)
{
postSplitTest(actors, leafChunkCount, supportChunkCount, partitionToSubsupport);
}
for (auto actor : actors)
{
if (actor)
actor->release();
}
free(family);
}
static void recursivePartitionPostSplitTestCounts(const std::vector<Nv::Blast::Actor*>& actors, uint32_t leafChunkCount, uint32_t supportChunkCount, bool partitionToSubsupport)
{
// Test to see that all actors are split down to single support chunks
uint32_t remainingActorCount = 0;
for (uint32_t i = 0; i < actors.size(); ++i)
{
Nv::Blast::Actor* a = (Nv::Blast::Actor*)actors[i];
if (a == nullptr)
{
continue;
}
++remainingActorCount;
NVBLAST_ASSERT(1 == a->getVisibleChunkCount() || a->hasExternalBonds());
EXPECT_TRUE(1 == a->getVisibleChunkCount() || a->hasExternalBonds());
if (!partitionToSubsupport)
{
EXPECT_EQ(1, a->getGraphNodeCount());
}
if (0 == a->getVisibleChunkCount())
{
EXPECT_TRUE(a->hasExternalBonds());
EXPECT_EQ(1, a->getGraphNodeCount());
EXPECT_EQ(a->getFamilyHeader()->m_asset->m_graph.m_nodeCount - 1, a->getFirstGraphNodeIndex());
--remainingActorCount; // Do not count this as a remaining actor, to be compared with leaf or support chunk counts later
}
const bool actorReleaseResult = NvBlastActorDeactivate(actors[i], nullptr);
EXPECT_TRUE(actorReleaseResult);
}
if (partitionToSubsupport)
{
EXPECT_EQ(leafChunkCount, remainingActorCount);
}
else
{
EXPECT_EQ(supportChunkCount, remainingActorCount);
}
}
static void testActorVisibleChunks(const Nv::Blast::Actor& actor, NvBlastLog)
{
const Nv::Blast::Asset& asset = *actor.getAsset();
const NvBlastChunk* chunks = asset.getChunks();
if (actor.isSubSupportChunk())
{
EXPECT_EQ(1, actor.getVisibleChunkCount());
const uint32_t firstVisibleChunkIndex = (uint32_t)Nv::Blast::Actor::VisibleChunkIt(actor);
EXPECT_EQ(actor.getIndex() - asset.m_graph.m_nodeCount, firstVisibleChunkIndex - asset.m_firstSubsupportChunkIndex);
// Make sure the visible chunk is subsupport
// Array of support flags
std::vector<bool> isSupport(asset.m_chunkCount, false);
for (uint32_t i = 0; i < asset.m_graph.m_nodeCount; ++i)
{
const uint32_t chunkIndex = asset.m_graph.getChunkIndices()[i];
if (!Nv::Blast::isInvalidIndex(chunkIndex))
{
isSupport[chunkIndex] = true;
}
}
// Climb hierarchy to find support chunk
uint32_t chunkIndex = firstVisibleChunkIndex;
while (chunkIndex != Nv::Blast::invalidIndex<uint32_t>())
{
if (isSupport[chunkIndex])
{
break;
}
chunkIndex = chunks[chunkIndex].parentChunkIndex;
}
EXPECT_FALSE(Nv::Blast::isInvalidIndex(chunkIndex));
}
else
{
// Array of visibility flags
std::vector<bool> isVisible(asset.m_chunkCount, false);
for (Nv::Blast::Actor::VisibleChunkIt i = actor; (bool)i; ++i)
{
isVisible[(uint32_t)i] = true;
}
// Mark visible nodes representing graph chunks
std::vector<bool> visibleChunkFound(asset.m_chunkCount, false);
// Make sure every graph chunk is represented by a visible chunk, or represents the world
for (Nv::Blast::Actor::GraphNodeIt i = actor; (bool)i; ++i)
{
const uint32_t graphNodeIndex = (uint32_t)i;
uint32_t chunkIndex = asset.m_graph.getChunkIndices()[graphNodeIndex];
// Climb hierarchy to find visible chunk
while (chunkIndex != Nv::Blast::invalidIndex<uint32_t>())
{
// Check that chunk owners are accurate
EXPECT_EQ(actor.getIndex(), actor.getFamilyHeader()->getChunkActorIndices()[chunkIndex]);
if (isVisible[chunkIndex])
{
visibleChunkFound[chunkIndex] = true;
break;
}
chunkIndex = chunks[chunkIndex].parentChunkIndex;
}
EXPECT_TRUE(!Nv::Blast::isInvalidIndex(chunkIndex) || (graphNodeIndex == asset.m_graph.m_nodeCount-1 && actor.hasExternalBonds()));
}
// Check that all visible chunks are accounted for
for (uint32_t i = 0; i < asset.m_chunkCount; ++i)
{
EXPECT_EQ(visibleChunkFound[i], isVisible[i]);
}
// Make sure that, if all siblings are intact, they are invisible
for (uint32_t i = 0; i < asset.m_chunkCount; ++i)
{
bool allIntact = true;
bool noneVisible = true;
if (chunks[i].firstChildIndex < asset.getUpperSupportChunkCount()) // Do not check subsupport
{
for (uint32_t j = chunks[i].firstChildIndex; j < chunks[i].childIndexStop; ++j)
{
allIntact = allIntact && actor.getFamilyHeader()->getChunkActorIndices()[j] == actor.getIndex();
noneVisible = noneVisible && !isVisible[j];
}
EXPECT_TRUE(!allIntact || noneVisible);
}
}
}
}
static void recursivePartitionPostSplitTestVisibleChunks(const std::vector<Nv::Blast::Actor*>& actors, uint32_t leafChunkCount, uint32_t supportChunkCount, bool partitionToSubsupport)
{
for (uint32_t i = 0; i < actors.size(); ++i)
{
Nv::Blast::Actor* a = (Nv::Blast::Actor*)actors[i];
if (a == nullptr)
{
continue;
}
testActorVisibleChunks(*a, nullptr);
}
}
void partitionActorsToSupportChunks
(
uint32_t assetDescCount,
const NvBlastAssetDesc* assetDescs,
void(*preSplitTest)(const Nv::Blast::Actor&, NvBlastLog),
void(*postSplitTest)(const std::vector<Nv::Blast::Actor*>&, uint32_t, uint32_t, bool),
bool partitionToSubsupport
)
{
srand(0);
for (uint32_t i = 0; i < assetDescCount; ++i)
{
// Create an asset
NvBlastAsset* asset = buildAsset(assetDescs[i]);
// Perform repeated partitioning
instanceAndPartitionRecursively(*asset, partitionToSubsupport, preSplitTest, postSplitTest);
// Free the asset
free(asset);
}
}
static void compareFamilies(const NvBlastFamily* family1, const NvBlastFamily* family2, NvBlastLog logFn)
{
// first check that the family sizes are the same
// still do the byte comparison even if they aren't equal to make it easier to spot where things went wrong
const uint32_t size1 = NvBlastFamilyGetSize(family1, logFn);
const uint32_t size2 = NvBlastFamilyGetSize(family2, logFn);
const uint32_t size = std::min(size1, size2);
if (size1 != size2)
{
std::ostringstream msg;
msg << "Family deserialization sizes don't match [" << size1 << ", " << size2 << "].";
logFn(NvBlastMessage::Error, msg.str().c_str(), __FILE__, __LINE__);
}
const char* block1 = reinterpret_cast<const char*>(family1);
const char* block2 = reinterpret_cast<const char*>(family2);
#if 0
EXPECT_EQ(0, memcmp(block1, block2, size));
#else
bool diffFound = false;
size_t startDiff = 0;
for (size_t i = 0; i < size; ++i)
{
if (block1[i] != block2[i])
{
diffFound = true;
startDiff = i;
break;
}
}
if (!diffFound)
{
return;
}
size_t endDiff = startDiff;
for (size_t i = size; i--;)
{
if (block1[i] != block2[i])
{
endDiff = i;
break;
}
}
std::ostringstream msg;
msg << "Family deserialization does not match in range [" << startDiff << ", " << endDiff << "].";
logFn(NvBlastMessage::Error, msg.str().c_str(), __FILE__, __LINE__);
#endif
}
static void testActorBlockSerialize(std::vector<NvBlastActor*>& actors, NvBlastLog logFn)
{
if (actors.size())
{
const NvBlastFamily* family = NvBlastActorGetFamily(actors[0], logFn);
const uint32_t size = NvBlastFamilyGetSize(family, logFn);
s_storage.insert(s_storage.end(), (char*)family, (char*)family + size);
}
}
static void testActorCapnSerialize(std::vector<NvBlastActor*>& actors, NvBlastLog logFn)
{
if (actors.size())
{
const NvBlastFamily* family = NvBlastActorGetFamily(actors[0], logFn);
Nv::Blast::ExtSerialization* ser = NvBlastExtSerializationCreate();
EXPECT_TRUE(ser != nullptr);
EXPECT_TRUE(ser->getSerializationEncoding() == Nv::Blast::ExtSerialization::EncodingID::CapnProtoBinary);
void* serializedFamilyBuffer = nullptr;
const uint64_t serialFamilySize =
ser->serializeIntoBuffer(serializedFamilyBuffer, family, Nv::Blast::LlObjectTypeID::Family);
EXPECT_TRUE(serialFamilySize != 0);
s_storage.insert(s_storage.end(), (char*)&serialFamilySize, (char*)&serialFamilySize + sizeof(uint64_t));
s_storage.insert(s_storage.end(), (char*)serializedFamilyBuffer, (char*)serializedFamilyBuffer + serialFamilySize);
}
}
static void testActorDeserializeCommon(const NvBlastFamily* family, std::vector<NvBlastActor*>& actors, uint32_t size, NvBlastLog logFn)
{
EXPECT_LT(s_curr, s_storage.size());
EXPECT_TRUE(size > 0);
EXPECT_LE(s_curr + size, s_storage.size());
s_curr += size;
const NvBlastFamily* actorFamily = NvBlastActorGetFamily(actors[0], logFn);
// Family may contain different assets pointers, copy into new family block and set the same asset before comparing
Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actors[0]);
const Nv::Blast::Asset* solverAsset = a.getAsset();
const uint32_t familySize = NvBlastFamilyGetSize(family, logFn);
std::vector<char> storageFamilyCopy((char*)family, (char*)family + familySize);
NvBlastFamily* storageFamily = reinterpret_cast<NvBlastFamily*>(storageFamilyCopy.data());
NvBlastFamilySetAsset(storageFamily, solverAsset, logFn);
{
const uint32_t actorCountExpected = NvBlastFamilyGetActorCount(storageFamily, logFn);
std::vector<NvBlastActor*> blockActors(actorCountExpected);
const uint32_t actorCountReturned = NvBlastFamilyGetActors(blockActors.data(), actorCountExpected, storageFamily, logFn);
EXPECT_EQ(actorCountExpected, actorCountReturned);
}
compareFamilies(storageFamily, actorFamily, logFn);
}
static void testActorBlockDeserialize(std::vector<NvBlastActor*>& actors, NvBlastLog logFn)
{
if (actors.size())
{
const NvBlastFamily* family = reinterpret_cast<NvBlastFamily*>(&s_storage[s_curr]);
const uint32_t size = NvBlastFamilyGetSize(family, logFn);
testActorDeserializeCommon(family, actors, size, logFn);
}
}
static void testActorCapnDeserialize(std::vector<NvBlastActor*>& actors, NvBlastLog logFn)
{
if (actors.size())
{
Nv::Blast::ExtSerialization* ser = NvBlastExtSerializationCreate();
EXPECT_TRUE(ser->getSerializationEncoding() == Nv::Blast::ExtSerialization::EncodingID::CapnProtoBinary);
// the serialized size is stored in the stream right before the data itself, pull it out first
uint32_t objTypeId;
const uint64_t& size = *reinterpret_cast<const uint64_t*>(&s_storage[s_curr]);
s_curr += sizeof(uint64_t);
EXPECT_LE(size, UINT32_MAX);
// now read the buffer itself
void* object = ser->deserializeFromBuffer(&s_storage[s_curr], size, &objTypeId);
EXPECT_TRUE(object != nullptr);
EXPECT_TRUE(objTypeId == Nv::Blast::LlObjectTypeID::Family);
// finally compare it with the original family
const NvBlastFamily* family = reinterpret_cast<NvBlastFamily*>(object);
testActorDeserializeCommon(family, actors, (uint32_t)size, logFn);
}
}
// Serialize all actors and then deserialize back into a new family in a random order, and compare with the original family
static void testActorSerializationNewFamily(std::vector<NvBlastActor*>& actors, NvBlastLog logFn)
{
if (actors.size() == 0)
{
return;
}
Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actors[0]);
const Nv::Blast::Asset* solverAsset = a.getAsset();
const uint32_t serSizeBound = NvBlastAssetGetActorSerializationSizeUpperBound(solverAsset, logFn);
std::vector< std::vector<char> > streams(actors.size());
for (size_t i = 0; i < actors.size(); ++i)
{
const uint32_t serSize = NvBlastActorGetSerializationSize(actors[i], logFn);
EXPECT_GE(serSizeBound, serSize);
std::vector<char>& stream = streams[i];
stream.resize(serSize);
const uint32_t bytesWritten = NvBlastActorSerialize(stream.data(), serSize, actors[i], logFn);
EXPECT_EQ(serSize, bytesWritten);
}
void* fmem = alloc(NvBlastAssetGetFamilyMemorySize(solverAsset, logFn));
NvBlastFamily* newFamily = NvBlastAssetCreateFamily(fmem, solverAsset, logFn);
std::vector<size_t> order(actors.size());
for (size_t i = 0; i < order.size(); ++i)
{
order[i] = i;
}
std::random_device rd;
std::mt19937 g(rd());
std::shuffle(order.begin(), order.end(), g);
for (size_t i = 0; i < actors.size(); ++i)
{
NvBlastActor* newActor = NvBlastFamilyDeserializeActor(newFamily, streams[order[i]].data(), logFn);
EXPECT_TRUE(newActor != nullptr);
}
const NvBlastFamily* oldFamily = NvBlastActorGetFamily(&a, logFn);
// Allow there to be differences with invalid actors
const Nv::Blast::FamilyHeader* f1 = reinterpret_cast<const Nv::Blast::FamilyHeader*>(oldFamily);
const Nv::Blast::FamilyHeader* f2 = reinterpret_cast<const Nv::Blast::FamilyHeader*>(newFamily);
for (uint32_t actorN = 0; actorN < f1->getActorsArraySize(); ++actorN)
{
const Nv::Blast::Actor* a1 = f1->getActors() + actorN;
Nv::Blast::Actor* a2 = const_cast<Nv::Blast::Actor*>(f2->getActors() + actorN);
EXPECT_EQ(a1->isActive(), a2->isActive());
if (!a1->isActive())
{
*a2 = *a1; // Actual data does not matter, setting equal to pass comparison
}
}
compareFamilies(oldFamily, newFamily, logFn);
free(newFamily);
}
// Copy the family and then serialize some subset of actors, deleting them afterwards.
// Then, deserialize back into the block and compare the original and new families.
static void testActorSerializationPartialBlock(std::vector<NvBlastActor*>& actors, NvBlastLog logFn)
{
if (actors.size() <= 1)
{
return;
}
Nv::Blast::Actor& a = *static_cast<Nv::Blast::Actor*>(actors[0]);
const Nv::Blast::Asset* solverAsset = a.getAsset();
const NvBlastFamily* oldFamily = NvBlastActorGetFamily(&a, logFn);
const uint32_t size = NvBlastFamilyGetSize(oldFamily, logFn);
std::vector<char> buffer((char*)oldFamily, (char*)oldFamily + size);
NvBlastFamily* familyCopy = reinterpret_cast<NvBlastFamily*>(buffer.data());
const uint32_t serCount = 1 + (rand() % actors.size() - 1);
const uint32_t actorCount = NvBlastFamilyGetActorCount(familyCopy, logFn);
std::vector<NvBlastActor*> actorsRemaining(actorCount);
const uint32_t actorsInFamily = NvBlastFamilyGetActors(&actorsRemaining[0], actorCount, familyCopy, logFn);
EXPECT_EQ(actorCount, actorsInFamily);
const uint32_t serSizeBound = NvBlastAssetGetActorSerializationSizeUpperBound(solverAsset, logFn);
std::vector< std::vector<char> > streams(serCount);
for (uint32_t i = 0; i < serCount; ++i)
{
std::vector<char>& stream = streams[i];
const uint32_t indexToStream = rand() % actorsRemaining.size();
NvBlastActor* actorToStream = actorsRemaining[indexToStream];
std::swap(actorsRemaining[indexToStream], actorsRemaining[actorsRemaining.size() - 1]);
actorsRemaining.pop_back();
const uint32_t serSize = NvBlastActorGetSerializationSize(actorToStream, logFn);
EXPECT_GE(serSizeBound, serSize);
stream.resize(serSize);
const uint32_t bytesWritten = NvBlastActorSerialize(&stream[0], serSize, actorToStream, logFn);
EXPECT_EQ(serSize, bytesWritten);
NvBlastActorDeactivate(actorToStream, logFn);
}
for (uint32_t i = 0; i < serCount; ++i)
{
NvBlastActor* newActor = NvBlastFamilyDeserializeActor(familyCopy, streams[i].data(), logFn);
EXPECT_TRUE(newActor != nullptr);
}
compareFamilies(oldFamily, familyCopy, logFn);
}
void damageLeafSupportActors
(
uint32_t assetCount,
uint32_t familyCount,
uint32_t damageCount,
bool simple,
void (*actorTest)(const Nv::Blast::Actor&, NvBlastLog),
void (*postDamageTest)(std::vector<NvBlastActor*>&, NvBlastLog),
CubeAssetGenerator::BondFlags bondFlags = CubeAssetGenerator::BondFlags::ALL_INTERNAL_BONDS
)
{
const float relativeDamageRadius = simple ? 0.75f : 0.2f;
const float compressiveDamage = 1.0f;
const uint32_t minChunkCount = simple ? 9 : 100;
const uint32_t maxChunkCount = simple ? 9 : 10000;
const bool printActorCount = false;
srand(0);
std::cout << "Asset # (out of " << assetCount << "): ";
for (uint32_t assetNum = 0; assetNum < assetCount; ++assetNum)
{
std::cout << assetNum + 1 << ".. ";
CubeAssetGenerator::Settings settings;
settings.extents = GeneratorAsset::Vec3(1, 1, 1);
settings.bondFlags = bondFlags;
CubeAssetGenerator::DepthInfo depthInfo;
depthInfo.slicesPerAxis = GeneratorAsset::Vec3(1, 1, 1);
depthInfo.flag = NvBlastChunkDesc::Flags::NoFlags;
settings.depths.push_back(depthInfo);
uint32_t chunkCount = 1;
while (chunkCount < minChunkCount)
{
uint32_t chunkMul;
do
{
depthInfo.slicesPerAxis = simple ? GeneratorAsset::Vec3(2, 2, 2) : GeneratorAsset::Vec3((float)(1 + rand() % 4), (float)(1 + rand() % 4), (float)(1 + rand() % 4));
chunkMul = (uint32_t)(depthInfo.slicesPerAxis.x * depthInfo.slicesPerAxis.y * depthInfo.slicesPerAxis.z);
} while (chunkMul == 1);
if (chunkCount*chunkMul > maxChunkCount)
{
break;
}
chunkCount *= chunkMul;
settings.depths.push_back(depthInfo);
settings.extents = settings.extents * depthInfo.slicesPerAxis;
}
settings.depths.back().flag = NvBlastChunkDesc::SupportFlag; // Leaves are support
// Make largest direction unit size
settings.extents = settings.extents * (1.0f / std::max(settings.extents.x, std::max(settings.extents.y, settings.extents.z)));
// Create asset
GeneratorAsset testAsset;
CubeAssetGenerator::generate(testAsset, settings);
NvBlastAssetDesc desc;
desc.chunkDescs = testAsset.solverChunks.data();
desc.chunkCount = (uint32_t)testAsset.solverChunks.size();
desc.bondDescs = testAsset.solverBonds.data();
desc.bondCount = (uint32_t)testAsset.solverBonds.size();
NvBlastAsset* asset = buildAsset(desc);
NvBlastID assetID = NvBlastAssetGetID(asset, messageLog);
// copy asset (for setAsset testing)
const char* data = (const char*)asset;
const uint32_t dataSize = NvBlastAssetGetSize(asset, messageLog);
char* duplicateData = (char*)alloc(dataSize);
memcpy(duplicateData, data, dataSize);
NvBlastAsset* assetDuplicate = (NvBlastAsset*)duplicateData;
// Generate families
for (uint32_t familyNum = 0; familyNum < familyCount; ++familyNum)
{
// family
void* fmem = alloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, messageLog); // Using zeroingAlloc in case actorTest compares memory blocks
NvBlastID id = NvBlastFamilyGetAssetID(family, messageLog);
EXPECT_TRUE(!memcmp(&assetID, &id, sizeof(NvBlastID)));
if (rand() % 2 == 0)
{
// replace asset with duplicate in half of cases to test setAsset
NvBlastFamilySetAsset(family, assetDuplicate, messageLog);
NvBlastID id2 = NvBlastFamilyGetAssetID(family, messageLog);
EXPECT_TRUE(!memcmp(&assetID, &id2, sizeof(NvBlastID)));
}
// actor
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
m_scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, m_scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// Generate damage
std::set<NvBlastActor*> actors;
actors.insert(actor);
if (printActorCount) std::cout << "Actors: 1.. ";
for (uint32_t damageNum = 0; damageNum < damageCount; ++damageNum)
{
GeneratorAsset::Vec3 localPos = settings.extents*GeneratorAsset::Vec3((float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f);
blast(actors, &testAsset, localPos, relativeDamageRadius, relativeDamageRadius*1.2f, compressiveDamage);
if (printActorCount) std::cout << actors.size() << ".. ";
if (actors.size() > 0)
{
const NvBlastFamily* family = NvBlastActorGetFamily(*actors.begin(), messageLog);
const uint32_t actorCount = NvBlastFamilyGetActorCount(family, messageLog);
EXPECT_EQ((uint32_t)actors.size(), actorCount);
if ((uint32_t)actors.size() == actorCount)
{
std::vector<NvBlastActor*> buffer1(actorCount);
const uint32_t actorsWritten = NvBlastFamilyGetActors(&buffer1[0], actorCount, family, messageLog);
EXPECT_EQ(actorsWritten, actorCount);
std::vector<NvBlastActor*> buffer2(actors.begin(), actors.end());
EXPECT_EQ(0, memcmp(&buffer1[0], buffer2.data(), actorCount*sizeof(NvBlastActor*)));
}
}
// Test individual actors
if (actorTest != nullptr)
{
for (std::set<NvBlastActor*>::iterator k = actors.begin(); k != actors.end(); ++k)
{
actorTest(*static_cast<Nv::Blast::Actor*>(*k), messageLog);
}
}
}
if (printActorCount) std::cout << "\n";
// Test fractured actor set
if (postDamageTest)
{
std::vector<NvBlastActor*> actorArray(actors.begin(), actors.end());
postDamageTest(actorArray, messageLog);
}
// Release remaining actors
for (std::set<NvBlastActor*>::iterator k = actors.begin(); k != actors.end(); ++k)
{
NvBlastActorDeactivate(*k, messageLog);
}
actors.clear();
free(family);
}
// Release asset data
free(asset);
free(assetDuplicate);
}
std::cout << "done.\n";
}
std::vector<NvBlastAsset*> m_assets;
std::vector<NvBlastActor*> m_actors;
std::vector<char> m_scratch;
static std::vector<char> s_storage;
static size_t s_curr;
};
// Static values
template<int FailLevel, int Verbosity>
std::vector<char> ActorTest<FailLevel, Verbosity>::s_storage;
template<int FailLevel, int Verbosity>
size_t ActorTest<FailLevel, Verbosity>::s_curr;
// Specializations
typedef ActorTest<NvBlastMessage::Error, 1> ActorTestAllowWarnings;
typedef ActorTest<NvBlastMessage::Warning, 1> ActorTestStrict;
// Tests
TEST_F(ActorTestStrict, InstanceActors)
{
// Build assets and instance actors
buildAssets();
instanceActors();
// Release actors and destroy assets
releaseActors();
destroyAssets();
}
TEST_F(ActorTestAllowWarnings, ActorHealthInitialization)
{
// Test all assets
std::vector<NvBlastAssetDesc> assetDescs;
assetDescs.insert(assetDescs.end(), g_assetDescs, g_assetDescs + getAssetDescCount());
assetDescs.insert(assetDescs.end(), g_assetDescsMissingCoverage, g_assetDescsMissingCoverage + getAssetDescMissingCoverageCount());
struct TestMode
{
enum Enum
{
Uniform,
Nonuniform,
Count
};
};
for (auto assetDesc : assetDescs)
{
NvBlastAsset* asset = buildAsset(assetDesc);
EXPECT_TRUE(asset != nullptr);
Nv::Blast::Asset& assetInt = static_cast<Nv::Blast::Asset&>(*asset);
NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, nullptr);
std::vector<float> supportChunkHealths(graph.nodeCount);
for (size_t i = 0; i < supportChunkHealths.size(); ++i)
{
supportChunkHealths[i] = 1.0f + (float)i;
}
std::vector<float> bondHealths(assetInt.getBondCount());
for (size_t i = 0; i < bondHealths.size(); ++i)
{
bondHealths[i] = 1.5f + (float)i;
}
for (int chunkTestMode = 0; chunkTestMode < TestMode::Count; ++chunkTestMode)
{
for (int bondTestMode = 0; bondTestMode < TestMode::Count; ++bondTestMode)
{
NvBlastActorDesc actorDesc;
switch (chunkTestMode)
{
default:
case TestMode::Uniform:
actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
break;
case TestMode::Nonuniform:
actorDesc.initialSupportChunkHealths = supportChunkHealths.data();
break;
}
switch (bondTestMode)
{
default:
case TestMode::Uniform:
actorDesc.initialBondHealths = nullptr;
actorDesc.uniformInitialBondHealth = 2.0f;
break;
case TestMode::Nonuniform:
actorDesc.initialBondHealths = bondHealths.data();
break;
}
void* fmem = alloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, nullptr);
std::vector<char> scratch((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
Nv::Blast::Actor& actorInt = static_cast<Nv::Blast::Actor&>(*actor);
Nv::Blast::FamilyHeader* header = actorInt.getFamilyHeader();
for (uint32_t i = 0; i < graph.nodeCount; ++i)
{
const uint32_t supportChunkIndex = graph.chunkIndices[i];
for (Nv::Blast::Asset::DepthFirstIt it(assetInt, supportChunkIndex); (bool)it; ++it)
{
const uint32_t chunkIndex = (uint32_t)it;
const uint32_t lowerSupportIndex = assetInt.getContiguousLowerSupportIndex(chunkIndex);
NVBLAST_ASSERT(lowerSupportIndex < assetInt.getLowerSupportChunkCount());
const float health = header->getLowerSupportChunkHealths()[lowerSupportIndex];
switch (chunkTestMode)
{
default:
case TestMode::Uniform:
EXPECT_EQ(1.0f, health);
break;
case TestMode::Nonuniform:
EXPECT_EQ(supportChunkHealths[i], health);
break;
}
}
}
for (uint32_t i = 0; i < assetInt.getBondCount(); ++i)
{
switch (bondTestMode)
{
default:
case TestMode::Uniform:
EXPECT_EQ(2.0f, header->getBondHealths()[i]);
break;
case TestMode::Nonuniform:
EXPECT_EQ(bondHealths[i], header->getBondHealths()[i]);
break;
}
}
NvBlastActorDeactivate(actor, messageLog);
free(family);
}
}
free(asset);
}
}
TEST_F(ActorTestStrict, PartitionActorsToSupportChunksTestCounts)
{
partitionActorsToSupportChunks(getAssetDescCount(), g_assetDescs, nullptr, recursivePartitionPostSplitTestCounts, false);
}
TEST_F(ActorTestAllowWarnings, PartitionActorsFromBadDescriptorsToSupportChunksTestCounts)
{
partitionActorsToSupportChunks(getAssetDescMissingCoverageCount(), g_assetDescsMissingCoverage, nullptr, recursivePartitionPostSplitTestCounts, false);
}
TEST_F(ActorTestStrict, PartitionActorsToLeafChunksTestCounts)
{
partitionActorsToSupportChunks(getAssetDescCount(), g_assetDescs, nullptr, recursivePartitionPostSplitTestCounts, true);
}
TEST_F(ActorTestAllowWarnings, PartitionActorsFromBadDescriptorsToLeafChunksTestCounts)
{
partitionActorsToSupportChunks(getAssetDescMissingCoverageCount(), g_assetDescsMissingCoverage, nullptr, recursivePartitionPostSplitTestCounts, true);
}
TEST_F(ActorTestStrict, PartitionActorsToSupportChunksTestVisibility)
{
partitionActorsToSupportChunks(getAssetDescCount(), g_assetDescs, testActorVisibleChunks, recursivePartitionPostSplitTestVisibleChunks, false);
}
TEST_F(ActorTestAllowWarnings, PartitionActorsFromBadDescriptorsToSupportChunksTestVisibility)
{
partitionActorsToSupportChunks(getAssetDescMissingCoverageCount(), g_assetDescsMissingCoverage, testActorVisibleChunks, recursivePartitionPostSplitTestVisibleChunks, false);
}
TEST_F(ActorTestStrict, PartitionActorsToLeafChunksTestVisibility)
{
partitionActorsToSupportChunks(getAssetDescCount(), g_assetDescs, testActorVisibleChunks, recursivePartitionPostSplitTestVisibleChunks, true);
}
TEST_F(ActorTestAllowWarnings, PartitionActorsFromBadDescriptorsToLeafChunksTestVisibility)
{
partitionActorsToSupportChunks(getAssetDescMissingCoverageCount(), g_assetDescsMissingCoverage, testActorVisibleChunks, recursivePartitionPostSplitTestVisibleChunks, true);
}
TEST_F(ActorTestStrict, DamageLeafSupportActorsTestVisibility)
{
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr);
}
TEST_F(ActorTestStrict, DamageLeafSupportActorTestBlockSerialization)
{
typedef CubeAssetGenerator::BondFlags BF;
s_storage.resize(0);
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorBlockSerialize);
s_curr = 0;
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorBlockDeserialize);
s_storage.resize(0);
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorBlockSerialize, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
s_curr = 0;
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorBlockDeserialize, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
s_storage.resize(0);
s_storage.resize(0);
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorCapnSerialize);
s_curr = 0;
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorCapnDeserialize);
s_storage.resize(0);
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorCapnSerialize, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
s_curr = 0;
damageLeafSupportActors(4, 4, 5, false, nullptr, testActorCapnDeserialize, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
s_storage.resize(0);
}
TEST_F(ActorTestStrict, DISABLED_DamageSimpleLeafSupportActorTestActorSerializationNewFamily)
{
typedef CubeAssetGenerator::BondFlags BF;
damageLeafSupportActors(1, 1, 4, true, nullptr, testActorSerializationNewFamily);
damageLeafSupportActors(1, 1, 4, true, nullptr, testActorSerializationNewFamily, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
}
TEST_F(ActorTestStrict, DamageSimpleLeafSupportActorTestActorSerializationPartialBlock)
{
typedef CubeAssetGenerator::BondFlags BF;
damageLeafSupportActors(1, 1, 4, true, nullptr, testActorSerializationPartialBlock);
damageLeafSupportActors(1, 1, 4, true, nullptr, testActorSerializationPartialBlock, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
}
TEST_F(ActorTestStrict, DISABLED_DamageLeafSupportActorTestActorSerializationNewFamily)
{
typedef CubeAssetGenerator::BondFlags BF;
damageLeafSupportActors(4, 4, 4, false, nullptr, testActorSerializationNewFamily);
damageLeafSupportActors(4, 4, 4, false, nullptr, testActorSerializationNewFamily, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
}
TEST_F(ActorTestStrict, DamageLeafSupportActorTestActorSerializationPartialBlock)
{
typedef CubeAssetGenerator::BondFlags BF;
damageLeafSupportActors(4, 4, 4, false, nullptr, testActorSerializationPartialBlock);
damageLeafSupportActors(4, 4, 4, false, nullptr, testActorSerializationPartialBlock, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
}
TEST_F(ActorTestStrict, DamageMultipleIslandLeafSupportActorsTestVisibility)
{
typedef CubeAssetGenerator::BondFlags BF;
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::Y_BONDS | BF::Z_BONDS); // Only connect y-z plane islands
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::Z_BONDS); // Only connect z-direction islands
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::NO_BONDS); // All support chunks disconnected (single-chunk islands)
}
TEST_F(ActorTestStrict, DamageBoundToWorldLeafSupportActorsTestVisibility)
{
typedef CubeAssetGenerator::BondFlags BF;
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::ALL_INTERNAL_BONDS | BF::X_MINUS_WORLD_BONDS);
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::ALL_INTERNAL_BONDS | BF::Y_PLUS_WORLD_BONDS);
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::ALL_INTERNAL_BONDS | BF::Z_MINUS_WORLD_BONDS);
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::ALL_INTERNAL_BONDS | BF::X_PLUS_WORLD_BONDS | BF::Y_MINUS_WORLD_BONDS);
damageLeafSupportActors(4, 4, 5, false, testActorVisibleChunks, nullptr, BF::ALL_INTERNAL_BONDS | BF::X_PLUS_WORLD_BONDS | BF::X_MINUS_WORLD_BONDS
| BF::Y_PLUS_WORLD_BONDS | BF::Y_MINUS_WORLD_BONDS
| BF::Z_PLUS_WORLD_BONDS | BF::Z_MINUS_WORLD_BONDS);
}
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/AssetTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastAsset.h"
#include "NvBlastMath.h"
#include "BlastBaseTest.h"
#include "NvBlastTkFramework.h"
#include <random>
#include <algorithm>
// all supported platform now provide serialization
// keep the define for future platforms that won't
#define ENABLE_SERIALIZATION_TESTS 1
#pragma warning( push )
#pragma warning( disable : 4267 )
// NOTE: Instead of excluding serialization and the tests when on VC12, should break the tests out into a separate C++ file.
#if ENABLE_SERIALIZATION_TESTS
#include "NvBlastExtSerialization.h"
#include "NvBlastExtLlSerialization.h"
#include "NvBlastExtSerializationInternal.h"
#endif
#include "NvBlastExtAssetUtils.h"
#pragma warning( pop )
#include <fstream>
#include <iosfwd>
#ifdef WIN32
#include <windows.h>
#endif
template<int FailLevel, int Verbosity>
class AssetTest : public BlastBaseTest<FailLevel, Verbosity>
{
public:
AssetTest()
{
NvBlastTkFrameworkCreate();
}
~AssetTest()
{
NvBlastTkFrameworkGet()->release();
}
static void messageLog(int type, const char* msg, const char* file, int line)
{
BlastBaseTest<FailLevel, Verbosity>::messageLog(type, msg, file, line);
}
static void* alloc(size_t size)
{
return BlastBaseTest<FailLevel, Verbosity>::alignedZeroedAlloc(size);
}
static void free(void* mem)
{
BlastBaseTest<FailLevel, Verbosity>::alignedFree(mem);
}
void testSubtreeLeafChunkCounts(const Nv::Blast::Asset& a)
{
const NvBlastChunk* chunks = a.getChunks();
const uint32_t* subtreeLeafChunkCounts = a.getSubtreeLeafChunkCounts();
uint32_t totalLeafChunkCount = 0;
for (uint32_t chunkIndex = 0; chunkIndex < a.m_chunkCount; ++chunkIndex)
{
const NvBlastChunk& chunk = chunks[chunkIndex];
if (Nv::Blast::isInvalidIndex(chunk.parentChunkIndex))
{
totalLeafChunkCount += subtreeLeafChunkCounts[chunkIndex];
}
const bool isLeafChunk = chunk.firstChildIndex >= chunk.childIndexStop;
uint32_t subtreeLeafChunkCount = isLeafChunk ? 1 : 0;
for (uint32_t childIndex = chunk.firstChildIndex; childIndex < chunk.childIndexStop; ++childIndex)
{
subtreeLeafChunkCount += subtreeLeafChunkCounts[childIndex];
}
EXPECT_EQ(subtreeLeafChunkCount, subtreeLeafChunkCounts[chunkIndex]);
}
EXPECT_EQ(totalLeafChunkCount, a.m_leafChunkCount);
}
void testChunkToNodeMap(const Nv::Blast::Asset& a)
{
for (uint32_t chunkIndex = 0; chunkIndex < a.m_chunkCount; ++chunkIndex)
{
const uint32_t nodeIndex = a.getChunkToGraphNodeMap()[chunkIndex];
if (!Nv::Blast::isInvalidIndex(nodeIndex))
{
EXPECT_LT(nodeIndex, a.m_graph.m_nodeCount);
EXPECT_EQ(chunkIndex, a.m_graph.getChunkIndices()[nodeIndex]);
}
else
{
const uint32_t* chunkIndexStop = a.m_graph.getChunkIndices() + a.m_graph.m_nodeCount;
const uint32_t* it = std::find<const uint32_t*, uint32_t>(a.m_graph.getChunkIndices(), chunkIndexStop, chunkIndex);
EXPECT_EQ(chunkIndexStop, it);
}
}
}
NvBlastAsset* buildAsset(const ExpectedAssetValues& expected, const NvBlastAssetDesc* desc)
{
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(desc, messageLog));
void* mem = alloc(NvBlastGetAssetMemorySize(desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(mem, desc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
if (asset == nullptr)
{
free(mem);
return nullptr;
}
Nv::Blast::Asset& a = *(Nv::Blast::Asset*)asset;
EXPECT_EQ(expected.totalChunkCount, a.m_chunkCount);
EXPECT_EQ(expected.graphNodeCount, a.m_graph.m_nodeCount);
EXPECT_EQ(expected.bondCount, a.m_graph.getAdjacencyPartition()[a.m_graph.m_nodeCount] / 2);
EXPECT_EQ(expected.leafChunkCount, a.m_leafChunkCount);
EXPECT_EQ(expected.subsupportChunkCount, a.m_chunkCount - a.m_firstSubsupportChunkIndex);
testSubtreeLeafChunkCounts(a);
testChunkToNodeMap(a);
return asset;
}
void checkAssetsExpected(Nv::Blast::Asset& asset, const ExpectedAssetValues& expected)
{
EXPECT_EQ(expected.totalChunkCount, asset.m_chunkCount);
EXPECT_EQ(expected.graphNodeCount, asset.m_graph.m_nodeCount);
EXPECT_EQ(expected.bondCount, asset.m_graph.getAdjacencyPartition()[asset.m_graph.m_nodeCount] / 2);
EXPECT_EQ(expected.leafChunkCount, asset.m_leafChunkCount);
EXPECT_EQ(expected.subsupportChunkCount, asset.m_chunkCount - asset.m_firstSubsupportChunkIndex);
testSubtreeLeafChunkCounts(asset);
testChunkToNodeMap(asset);
}
// expects that the bond normal points from the lower indexed chunk to higher index chunk
// uses chunk.centroid
// convention, requirement from findClosestNode
void checkNormalDir(NvBlastChunkDesc* chunkDescs, size_t chunkDescCount, NvBlastBondDesc* bondDescs, size_t bondDescCount)
{
for (size_t bondIndex = 0; bondIndex < bondDescCount; ++bondIndex)
{
NvBlastBondDesc& bond = bondDescs[bondIndex];
uint32_t chunkIndex0 = bond.chunkIndices[0];
uint32_t chunkIndex1 = bond.chunkIndices[1];
bool swap = chunkIndex0 > chunkIndex1;
uint32_t testIndex0 = swap ? chunkIndex1 : chunkIndex0;
uint32_t testIndex1 = swap ? chunkIndex0 : chunkIndex1;
EXPECT_TRUE(testIndex0 < testIndex1);
// no convention for world chunks
if (!Nv::Blast::isInvalidIndex(testIndex0) && !Nv::Blast::isInvalidIndex(testIndex1))
{
NvBlastChunkDesc& chunk0 = chunkDescs[testIndex0];
NvBlastChunkDesc& chunk1 = chunkDescs[testIndex1];
float dir[3];
Nv::Blast::VecMath::sub(chunk1.centroid, chunk0.centroid, dir);
bool meetsConvention = Nv::Blast::VecMath::dot(bond.bond.normal, dir) > 0;
EXPECT_TRUE(meetsConvention);
if (!meetsConvention)
{
printf("bond %zd chunks(%d,%d): %.2f %.2f %.2f %.2f %.2f %.2f %d\n",
bondIndex, chunkIndex0, chunkIndex1,
bond.bond.normal[0], bond.bond.normal[1], bond.bond.normal[2],
dir[0], dir[1], dir[2],
Nv::Blast::VecMath::dot(bond.bond.normal, dir) > 0);
}
}
}
}
// expects that the bond normal points from the lower indexed node to higher index node
// uses chunk.centroid
// convention, requirement from findClosestNode
void checkNormalDir(const NvBlastSupportGraph graph, const NvBlastChunk* assetChunks, const NvBlastBond* assetBonds)
{
for (uint32_t nodeIndex = 0; nodeIndex < graph.nodeCount; nodeIndex++)
{
uint32_t adjStart = graph.adjacencyPartition[nodeIndex];
uint32_t adjStop = graph.adjacencyPartition[nodeIndex + 1];
for (uint32_t adj = adjStart; adj < adjStop; ++adj)
{
uint32_t adjNodeIndex = graph.adjacentNodeIndices[adj];
bool swap = nodeIndex > adjNodeIndex;
uint32_t testIndex0 = swap ? adjNodeIndex : nodeIndex;
uint32_t testIndex1 = swap ? nodeIndex : adjNodeIndex;
// no convention for world chunks
if (!Nv::Blast::isInvalidIndex(graph.chunkIndices[testIndex0]) && !Nv::Blast::isInvalidIndex(graph.chunkIndices[testIndex1]))
{
const NvBlastChunk& chunk0 = assetChunks[graph.chunkIndices[testIndex0]];
const NvBlastChunk& chunk1 = assetChunks[graph.chunkIndices[testIndex1]];
uint32_t bondIndex = graph.adjacentBondIndices[adj];
const NvBlastBond& bond = assetBonds[bondIndex];
float dir[3];
Nv::Blast::VecMath::sub(chunk1.centroid, chunk0.centroid, dir);
bool meetsConvention = Nv::Blast::VecMath::dot(bond.normal, dir) > 0;
EXPECT_TRUE(meetsConvention);
if (!meetsConvention)
{
printf("bond %d nodes(%d,%d): %.2f %.2f %.2f %.2f %.2f %.2f %d\n",
bondIndex, nodeIndex, adjNodeIndex,
bond.normal[0], bond.normal[1], bond.normal[2],
dir[0], dir[1], dir[2],
Nv::Blast::VecMath::dot(bond.normal, dir) > 0);
}
}
}
}
}
void checkNormalDir(const NvBlastAsset* asset)
{
const NvBlastChunk* assetChunks = NvBlastAssetGetChunks(asset, nullptr);
const NvBlastBond* assetBonds = NvBlastAssetGetBonds(asset, nullptr);
const NvBlastSupportGraph graph = NvBlastAssetGetSupportGraph(asset, nullptr);
checkNormalDir(graph, assetChunks, assetBonds);
}
void buildAssetShufflingDescriptors(const NvBlastAssetDesc* desc, const ExpectedAssetValues& expected, uint32_t shuffleCount, bool useTk)
{
NvBlastAssetDesc shuffledDesc = *desc;
std::vector<NvBlastChunkDesc> chunkDescs(desc->chunkDescs, desc->chunkDescs + desc->chunkCount);
shuffledDesc.chunkDescs = chunkDescs.data();
std::vector<NvBlastBondDesc> bondDescs(desc->bondDescs, desc->bondDescs + desc->bondCount);
shuffledDesc.bondDescs = bondDescs.data();
if (!useTk)
{
std::vector<char> scratch(desc->chunkCount);
NvBlastEnsureAssetExactSupportCoverage(chunkDescs.data(), desc->chunkCount, scratch.data(), messageLog);
}
else
{
NvBlastTkFrameworkGet()->ensureAssetExactSupportCoverage(chunkDescs.data(), desc->chunkCount);
}
for (uint32_t i = 0; i < shuffleCount; ++i)
{
checkNormalDir(chunkDescs.data(), chunkDescs.size(), bondDescs.data(), bondDescs.size());
shuffleAndFixChunkDescs(chunkDescs.data(), desc->chunkCount, bondDescs.data(), desc->bondCount, useTk);
checkNormalDir(chunkDescs.data(), chunkDescs.size(), bondDescs.data(), bondDescs.size());
NvBlastAsset* asset = buildAsset(expected, &shuffledDesc);
EXPECT_TRUE(asset != nullptr);
checkNormalDir(asset);
if (asset)
{
free(asset);
}
}
}
void shuffleAndFixChunkDescs(NvBlastChunkDesc* chunkDescs, uint32_t chunkDescCount, NvBlastBondDesc* bondDescs, uint32_t bondDescCount, bool useTk)
{
// Create reorder array and fill with identity map
std::vector<uint32_t> shuffledOrder(chunkDescCount);
for (uint32_t i = 0; i < chunkDescCount; ++i)
{
shuffledOrder[i] = i;
}
// An array into which to copy the reordered descs
std::vector<NvBlastChunkDesc> shuffledChunkDescs(chunkDescCount);
std::random_device rd;
std::mt19937 g(rd());
std::vector<char> scratch;
const uint32_t trials = 30;
uint32_t attempt = 0;
while(1)
{
// Shuffle the reorder array
std::shuffle(shuffledOrder.begin(), shuffledOrder.end(), g);
// Save initial bonds
std::vector<NvBlastBondDesc> savedBondDescs(bondDescs, bondDescs + bondDescCount);
// Shuffle chunks and bonds
NvBlastApplyAssetDescChunkReorderMap(shuffledChunkDescs.data(), chunkDescs, chunkDescCount, bondDescs, bondDescCount, shuffledOrder.data(), true, nullptr);
// All the normals are pointing in the expected direction (they have been swapped)
checkNormalDir(shuffledChunkDescs.data(), chunkDescCount, bondDescs, bondDescCount);
checkNormalDir(chunkDescs, chunkDescCount, savedBondDescs.data(), bondDescCount);
// Check the results
for (uint32_t i = 0; i < chunkDescCount; ++i)
{
EXPECT_EQ(chunkDescs[i].userData, shuffledChunkDescs[shuffledOrder[i]].userData);
EXPECT_TRUE(chunkDescs[i].parentChunkDescIndex > chunkDescCount || shuffledChunkDescs[shuffledOrder[i]].parentChunkDescIndex == shuffledOrder[chunkDescs[i].parentChunkDescIndex]);
}
for (uint32_t i = 0; i < bondDescCount; ++i)
{
for (uint32_t k = 0; k < 2; ++k)
{
if (!Nv::Blast::isInvalidIndex(savedBondDescs[i].chunkIndices[k]))
{
EXPECT_EQ(shuffledOrder[savedBondDescs[i].chunkIndices[k]], bondDescs[i].chunkIndices[k]);
}
}
}
// Try creating asset, usually it should fail (otherwise make another attempt)
NvBlastAssetDesc desc = { chunkDescCount, shuffledChunkDescs.data(), bondDescCount, bondDescs };
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, nullptr));
void* mem = alloc(NvBlastGetAssetMemorySize(&desc, nullptr));
NvBlastAsset* asset = NvBlastCreateAsset(mem, &desc, scratch.data(), nullptr);
if (asset == nullptr)
{
free(mem);
break;
}
else
{
free(asset);
memcpy(bondDescs, savedBondDescs.data(), sizeof(NvBlastBondDesc) * bondDescCount);
attempt++;
if (attempt >= trials)
{
GTEST_NONFATAL_FAILURE_("Shuffled chunk descs should fail asset creation (most of the time).");
break;
}
}
}
// Now we want to fix that order
if (!useTk)
{
std::vector<uint32_t> chunkReorderMap(chunkDescCount);
std::vector<char> scratch2(3 * chunkDescCount * sizeof(uint32_t));
const bool isIdentity = NvBlastBuildAssetDescChunkReorderMap(chunkReorderMap.data(), shuffledChunkDescs.data(), chunkDescCount, scratch2.data(), messageLog);
EXPECT_FALSE(isIdentity);
NvBlastApplyAssetDescChunkReorderMap(chunkDescs, shuffledChunkDescs.data(), chunkDescCount, bondDescs, bondDescCount, chunkReorderMap.data(), true, messageLog);
}
else
{
memcpy(chunkDescs, shuffledChunkDescs.data(), chunkDescCount * sizeof(NvBlastChunkDesc));
const bool isIdentity = NvBlastTkFrameworkGet()->reorderAssetDescChunks(chunkDescs, chunkDescCount, bondDescs, bondDescCount, nullptr, true);
EXPECT_FALSE(isIdentity);
}
}
void mergeAssetTest(const NvBlastAssetDesc& desc, bool fail)
{
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(mem, &desc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
if (asset == nullptr)
{
free(mem);
return;
}
// Merge two copies of this asset together
const NvBlastAsset* components[2] = { asset, asset };
const NvcVec3 translations[2] = { { 0, 0, 0 },{ 2, 0, 0 } };
const NvBlastBond bond = { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 1.0f, 0.0f, 0.0f }, 0 };
NvBlastExtAssetUtilsBondDesc newBondDescs[4];
for (int i = 0; i < 4; ++i)
{
newBondDescs[i].bond = bond;
newBondDescs[i].chunkIndices[0] = 2 * (i + 1);
newBondDescs[i].chunkIndices[1] = 2 * i + 1;
newBondDescs[i].componentIndices[0] = 0;
newBondDescs[i].componentIndices[1] = 1;
}
// Create a merged descriptor
std::vector<uint32_t> chunkIndexOffsets(2);
std::vector<uint32_t> chunkReorderMap(2 * desc.chunkCount);
NvBlastAssetDesc mergedDesc = NvBlastExtAssetUtilsMergeAssets(components, nullptr, nullptr, translations, 2, newBondDescs, 4, chunkIndexOffsets.data(), chunkReorderMap.data(), 2 * desc.chunkCount);
EXPECT_EQ(2 * desc.bondCount + 4, mergedDesc.bondCount);
EXPECT_EQ(2 * desc.chunkCount, mergedDesc.chunkCount);
for (uint32_t i = 0; i < 2 * desc.chunkCount; ++i)
{
EXPECT_LT(chunkReorderMap[i], 2 * desc.chunkCount);
}
EXPECT_EQ(0, chunkIndexOffsets[0]);
EXPECT_EQ(desc.chunkCount, chunkIndexOffsets[1]);
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&mergedDesc, messageLog));
mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&mergedDesc, messageLog));
NvBlastAsset* mergedAsset = NvBlastCreateAsset(mem, &mergedDesc, scratch.data(), messageLog);
EXPECT_TRUE(mergedAsset != nullptr);
if (mergedAsset == nullptr)
{
free(mem);
return;
}
NVBLAST_FREE(const_cast<NvBlastBondDesc*>(mergedDesc.bondDescs));
NVBLAST_FREE(const_cast<NvBlastChunkDesc*>(mergedDesc.chunkDescs));
NVBLAST_FREE(mergedAsset);
if (!fail)
{
mergedDesc = NvBlastExtAssetUtilsMergeAssets(components, nullptr, nullptr, translations, 2, newBondDescs, 4, nullptr, chunkReorderMap.data(), 2 * desc.chunkCount);
EXPECT_EQ(2 * desc.bondCount + 4, mergedDesc.bondCount);
EXPECT_EQ(2 * desc.chunkCount, mergedDesc.chunkCount);
for (uint32_t i = 0; i < 2 * desc.chunkCount; ++i)
{
EXPECT_LT(chunkReorderMap[i], 2 * desc.chunkCount);
}
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&mergedDesc, messageLog));
mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&mergedDesc, messageLog));
mergedAsset = NvBlastCreateAsset(mem, &mergedDesc, scratch.data(), messageLog);
EXPECT_TRUE(mergedAsset != nullptr);
free(mem);
NVBLAST_FREE(const_cast<NvBlastBondDesc*>(mergedDesc.bondDescs));
NVBLAST_FREE(const_cast<NvBlastChunkDesc*>(mergedDesc.chunkDescs));
}
else
{
// We don't pass in a valid chunkReorderMap so asset creation should fail
mergedDesc = NvBlastExtAssetUtilsMergeAssets(components, nullptr, nullptr, translations, 2, newBondDescs, 4, chunkIndexOffsets.data(), nullptr, 0);
EXPECT_EQ(2 * desc.bondCount + 4, mergedDesc.bondCount);
EXPECT_EQ(2 * desc.chunkCount, mergedDesc.chunkCount);
EXPECT_EQ(0, chunkIndexOffsets[0]);
EXPECT_EQ(desc.chunkCount, chunkIndexOffsets[1]);
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&mergedDesc, messageLog));
mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&mergedDesc, messageLog));
mergedAsset = NvBlastCreateAsset(mem, &mergedDesc, scratch.data(), messageLog);
EXPECT_TRUE(mergedAsset == nullptr);
free(mem);
NVBLAST_FREE(const_cast<NvBlastBondDesc*>(mergedDesc.bondDescs));
NVBLAST_FREE(const_cast<NvBlastChunkDesc*>(mergedDesc.chunkDescs));
mergedDesc = NvBlastExtAssetUtilsMergeAssets(components, nullptr, nullptr, translations, 2, newBondDescs, 4, nullptr, nullptr, 0);
EXPECT_EQ(2 * desc.bondCount + 4, mergedDesc.bondCount);
EXPECT_EQ(2 * desc.chunkCount, mergedDesc.chunkCount);
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&mergedDesc, messageLog));
mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&mergedDesc, messageLog));
mergedAsset = NvBlastCreateAsset(mem, &mergedDesc, scratch.data(), messageLog);
EXPECT_TRUE(mergedAsset == nullptr);
free(mem);
NVBLAST_FREE(const_cast<NvBlastBondDesc*>(mergedDesc.bondDescs));
NVBLAST_FREE(const_cast<NvBlastChunkDesc*>(mergedDesc.chunkDescs));
// We lie and say the chunkReorderMap is not large enough. It should be filled with 0xFFFFFFFF up to the size we gave
mergedDesc = NvBlastExtAssetUtilsMergeAssets(components, nullptr, nullptr, translations, 2, newBondDescs, 4, nullptr, chunkReorderMap.data(), desc.chunkCount);
EXPECT_EQ(2 * desc.bondCount + 4, mergedDesc.bondCount);
EXPECT_EQ(2 * desc.chunkCount, mergedDesc.chunkCount);
for (uint32_t i = 0; i < desc.chunkCount; ++i)
{
EXPECT_TRUE(Nv::Blast::isInvalidIndex(chunkReorderMap[i]));
}
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&mergedDesc, messageLog));
mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&mergedDesc, messageLog));
mergedAsset = NvBlastCreateAsset(mem, &mergedDesc, scratch.data(), messageLog);
EXPECT_TRUE(mergedAsset == nullptr);
free(mem);
NVBLAST_FREE(const_cast<NvBlastBondDesc*>(mergedDesc.bondDescs));
NVBLAST_FREE(const_cast<NvBlastChunkDesc*>(mergedDesc.chunkDescs));
mergedDesc = NvBlastExtAssetUtilsMergeAssets(components, nullptr, nullptr, translations, 2, newBondDescs, 4, chunkIndexOffsets.data(), chunkReorderMap.data(), desc.chunkCount);
EXPECT_EQ(2 * desc.bondCount + 4, mergedDesc.bondCount);
EXPECT_EQ(2 * desc.chunkCount, mergedDesc.chunkCount);
for (uint32_t i = 0; i < desc.chunkCount; ++i)
{
EXPECT_TRUE(Nv::Blast::isInvalidIndex(chunkReorderMap[i]));
}
EXPECT_EQ(0, chunkIndexOffsets[0]);
EXPECT_EQ(desc.chunkCount, chunkIndexOffsets[1]);
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&mergedDesc, messageLog));
mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&mergedDesc, messageLog));
mergedAsset = NvBlastCreateAsset(mem, &mergedDesc, scratch.data(), messageLog);
EXPECT_TRUE(mergedAsset == nullptr);
free(mem);
NVBLAST_FREE(const_cast<NvBlastBondDesc*>(mergedDesc.bondDescs));
NVBLAST_FREE(const_cast<NvBlastChunkDesc*>(mergedDesc.chunkDescs));
}
// Finally free the original asset
NVBLAST_FREE(asset);
}
};
typedef AssetTest<-1, 0> AssetTestAllowErrorsSilently;
typedef AssetTest<NvBlastMessage::Error, 0> AssetTestAllowWarningsSilently;
typedef AssetTest<NvBlastMessage::Error, 1> AssetTestAllowWarnings;
typedef AssetTest<NvBlastMessage::Warning, 1> AssetTestStrict;
TEST_F(AssetTestStrict, BuildAssets)
{
const uint32_t assetDescCount = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
std::vector<NvBlastAsset*> assets(assetDescCount);
// Build
for (uint32_t i = 0; i < assetDescCount; ++i)
{
assets[i] = buildAsset(g_assetExpectedValues[i], &g_assetDescs[i]);
}
// Destroy
for (uint32_t i = 0; i < assetDescCount; ++i)
{
if (assets[i])
{
free(assets[i]);
}
}
}
#if ENABLE_SERIALIZATION_TESTS
TEST_F(AssetTestStrict, SerializeAssets)
{
Nv::Blast::ExtSerialization* ser = NvBlastExtSerializationCreate();
EXPECT_TRUE(ser != nullptr);
const uint32_t assetDescCount = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
std::vector<Nv::Blast::Asset*> assets(assetDescCount);
// Build
for (uint32_t i = 0; i < assetDescCount; ++i)
{
assets[i] = reinterpret_cast<Nv::Blast::Asset*>(buildAsset(g_assetExpectedValues[i], &g_assetDescs[i]));
}
// Serialize them
for (Nv::Blast::Asset* asset : assets)
{
void* buffer;
const uint64_t size = NvBlastExtSerializationSerializeAssetIntoBuffer(buffer, *ser, asset);
EXPECT_TRUE(size != 0);
uint32_t objectTypeID;
uint32_t encodingID;
uint64_t dataSize = 0;
EXPECT_TRUE(ser->peekHeader(&objectTypeID, &encodingID, &dataSize, buffer, size));
EXPECT_EQ(objectTypeID, Nv::Blast::LlObjectTypeID::Asset);
EXPECT_EQ(encodingID, ser->getSerializationEncoding());
EXPECT_EQ(dataSize + Nv::Blast::ExtSerializationInternal::HeaderSize, size);
}
// Destroy
for (uint32_t i = 0; i < assetDescCount; ++i)
{
if (assets[i])
{
free(assets[i]);
}
}
ser->release();
}
TEST_F(AssetTestStrict, SerializeAssetsRoundTrip)
{
Nv::Blast::ExtSerialization* ser = NvBlastExtSerializationCreate();
EXPECT_TRUE(ser != nullptr);
const uint32_t assetDescCount = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
std::vector<Nv::Blast::Asset*> assets(assetDescCount);
// Build
for (uint32_t i = 0; i < assetDescCount; ++i)
{
assets[i] = reinterpret_cast<Nv::Blast::Asset*>(buildAsset(g_assetExpectedValues[i], &g_assetDescs[i]));
}
const uint32_t encodings[] =
{
Nv::Blast::ExtSerialization::EncodingID::CapnProtoBinary,
Nv::Blast::ExtSerialization::EncodingID::RawBinary
};
for (auto encoding : encodings)
{
ser->setSerializationEncoding(encoding);
// Serialize them
for (uint32_t i = 0; i < assetDescCount; ++i)
{
Nv::Blast::Asset* asset = assets[i];
void* buffer;
const uint64_t size = NvBlastExtSerializationSerializeAssetIntoBuffer(buffer, *ser, asset);
EXPECT_TRUE(size != 0);
Nv::Blast::Asset* rtAsset = reinterpret_cast<Nv::Blast::Asset*>(ser->deserializeFromBuffer(buffer, size));
//TODO: Compare assets
checkAssetsExpected(*rtAsset, g_assetExpectedValues[i]);
free(static_cast<void*>(rtAsset));
}
}
// Destroy
for (uint32_t i = 0; i < assetDescCount; ++i)
{
if (assets[i])
{
free(assets[i]);
}
}
ser->release();
}
TEST_F(AssetTestStrict, SerializeAssetsRoundTripWithSkipping)
{
Nv::Blast::ExtSerialization* ser = NvBlastExtSerializationCreate();
EXPECT_TRUE(ser != nullptr);
std::vector<char> stream;
class StreamBufferProvider : public Nv::Blast::ExtSerialization::BufferProvider
{
public:
StreamBufferProvider(std::vector<char>& stream) : m_stream(stream), m_cursor(0) {}
virtual void* requestBuffer(size_t size) override
{
m_stream.resize(m_cursor + size);
void* data = m_stream.data() + m_cursor;
m_cursor += size;
return data;
}
private:
std::vector<char>& m_stream;
size_t m_cursor;
} myStreamProvider(stream);
ser->setBufferProvider(&myStreamProvider);
const uint32_t assetDescCount = sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
std::vector<Nv::Blast::Asset*> assets(assetDescCount);
// Build
for (uint32_t i = 0; i < assetDescCount; ++i)
{
assets[i] = reinterpret_cast<Nv::Blast::Asset*>(buildAsset(g_assetExpectedValues[i], &g_assetDescs[i]));
}
const uint32_t encodings[] =
{
Nv::Blast::ExtSerialization::EncodingID::CapnProtoBinary,
Nv::Blast::ExtSerialization::EncodingID::RawBinary
};
for (auto encoding : encodings)
{
ser->setSerializationEncoding(encoding);
// Serialize them
for (uint32_t i = 0; i < assetDescCount; ++i)
{
void* buffer;
const uint64_t size = NvBlastExtSerializationSerializeAssetIntoBuffer(buffer, *ser, assets[i]);
EXPECT_TRUE(size != 0);
}
}
// Deserialize from stream
const void* buffer = stream.data();
uint64_t bufferSize = stream.size();
for (uint32_t assetCount = 0; bufferSize; ++assetCount)
{
uint32_t objectTypeID;
uint32_t encodingID;
const bool peekSuccess = ser->peekHeader(&objectTypeID, &encodingID, nullptr, buffer, bufferSize);
EXPECT_TRUE(peekSuccess);
if (!peekSuccess)
{
break;
}
EXPECT_EQ(Nv::Blast::LlObjectTypeID::Asset, objectTypeID);
if (assetCount < assetDescCount)
{
EXPECT_EQ(Nv::Blast::ExtSerialization::EncodingID::CapnProtoBinary, encodingID);
}
else
{
EXPECT_EQ(Nv::Blast::ExtSerialization::EncodingID::RawBinary, encodingID);
}
const bool skip = (assetCount & 1) != 0;
if (!skip)
{
const uint32_t assetnum = assetCount % assetDescCount;
Nv::Blast::Asset* rtAsset = reinterpret_cast<Nv::Blast::Asset*>(ser->deserializeFromBuffer(buffer, bufferSize));
EXPECT_TRUE(rtAsset != nullptr);
if (rtAsset == nullptr)
{
break;
}
//TODO: Compare assets
checkAssetsExpected(*rtAsset, g_assetExpectedValues[assetnum]);
free(static_cast<void*>(rtAsset));
}
buffer = ser->skipObject(bufferSize, buffer);
}
// Destroy
for (uint32_t i = 0; i < assetDescCount; ++i)
{
if (assets[i])
{
free(assets[i]);
}
}
ser->release();
}
#endif // ENABLE_SERIALIZATION_TESTS
TEST_F(AssetTestAllowWarnings, BuildAssetsMissingCoverage)
{
const uint32_t assetDescCount = sizeof(g_assetDescsMissingCoverage) / sizeof(g_assetDescsMissingCoverage[0]);
std::vector<NvBlastAsset*> assets(assetDescCount);
// Build
for (uint32_t i = 0; i < assetDescCount; ++i)
{
const NvBlastAssetDesc* desc = &g_assetDescsMissingCoverage[i];
NvBlastAssetDesc fixedDesc = *desc;
std::vector<NvBlastChunkDesc> chunkDescs(desc->chunkDescs, desc->chunkDescs + desc->chunkCount);
std::vector<NvBlastBondDesc> bondDescs(desc->bondDescs, desc->bondDescs + desc->bondCount);
std::vector<uint32_t> chunkReorderMap(desc->chunkCount);
std::vector<char> scratch(desc->chunkCount * sizeof(NvBlastChunkDesc));
const bool changedCoverage = !NvBlastEnsureAssetExactSupportCoverage(chunkDescs.data(), fixedDesc.chunkCount, scratch.data(), messageLog);
EXPECT_TRUE(changedCoverage);
NvBlastReorderAssetDescChunks(chunkDescs.data(), fixedDesc.chunkCount, bondDescs.data(), fixedDesc.bondCount, chunkReorderMap.data(), true, scratch.data(), messageLog);
fixedDesc.chunkDescs = chunkDescs.data();
fixedDesc.bondDescs = bondDescs.data();
assets[i] = buildAsset(g_assetsFromMissingCoverageExpectedValues[i], &fixedDesc);
}
// Destroy
for (uint32_t i = 0; i < assetDescCount; ++i)
{
if (assets[i])
{
free(assets[i]);
}
}
}
TEST_F(AssetTestAllowWarningsSilently, BuildAssetsShufflingChunkDescriptors)
{
for (uint32_t i = 0; i < sizeof(g_assetDescs) / sizeof(g_assetDescs[0]); ++i)
{
buildAssetShufflingDescriptors(&g_assetDescs[i], g_assetExpectedValues[i], 10, false);
}
for (uint32_t i = 0; i < sizeof(g_assetDescsMissingCoverage) / sizeof(g_assetDescsMissingCoverage[0]); ++i)
{
buildAssetShufflingDescriptors(&g_assetDescsMissingCoverage[i], g_assetsFromMissingCoverageExpectedValues[i], 10, false);
}
}
TEST_F(AssetTestAllowWarningsSilently, BuildAssetsShufflingChunkDescriptorsUsingTk)
{
for (uint32_t i = 0; i < sizeof(g_assetDescs) / sizeof(g_assetDescs[0]); ++i)
{
buildAssetShufflingDescriptors(&g_assetDescs[i], g_assetExpectedValues[i], 10, true);
}
for (uint32_t i = 0; i < sizeof(g_assetDescsMissingCoverage) / sizeof(g_assetDescsMissingCoverage[0]); ++i)
{
buildAssetShufflingDescriptors(&g_assetDescsMissingCoverage[i], g_assetsFromMissingCoverageExpectedValues[i], 10, true);
}
}
TEST_F(AssetTestStrict, MergeAssetsUpperSupportOnly)
{
mergeAssetTest(g_assetDescs[0], false);
}
TEST_F(AssetTestStrict, MergeAssetsWithSubsupport)
{
mergeAssetTest(g_assetDescs[1], false);
}
TEST_F(AssetTestStrict, MergeAssetsWithWorldBondsUpperSupportOnly)
{
mergeAssetTest(g_assetDescs[3], false);
}
TEST_F(AssetTestStrict, MergeAssetsWithWorldBondsWithSubsupport)
{
mergeAssetTest(g_assetDescs[4], false);
}
TEST_F(AssetTestAllowErrorsSilently, MergeAssetsUpperSupportOnlyExpectFail)
{
mergeAssetTest(g_assetDescs[0], true);
}
TEST_F(AssetTestAllowErrorsSilently, MergeAssetsWithSubsupportExpectFail)
{
mergeAssetTest(g_assetDescs[1], true);
}
TEST_F(AssetTestAllowErrorsSilently, MergeAssetsWithWorldBondsUpperSupportOnlyExpectFail)
{
mergeAssetTest(g_assetDescs[3], true);
}
TEST_F(AssetTestAllowErrorsSilently, MergeAssetsWithWorldBondsWithSubsupportExpectFail)
{
mergeAssetTest(g_assetDescs[4], true);
}
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/MultithreadingTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "BlastBaseTest.h"
#include "AssetGenerator.h"
#include <iostream>
#include <memory>
#include "TaskDispatcher.h"
#include "NvBlastActor.h"
#include "NvBlastExtDamageShaders.h"
typedef std::function<void(const Nv::Blast::Actor&, NvBlastLog)> ActorTestFunction;
typedef std::function<void(std::vector<NvBlastActor*>&, NvBlastLog)> PostDamageTestFunction;
static void blast(std::set<NvBlastActor*>& actorsToDamage, GeneratorAsset* testAsset, GeneratorAsset::Vec3 localPos, float minRadius, float maxRadius, float compressiveDamage)
{
std::vector<NvBlastChunkFractureData> chunkEvents; /* num lower-support chunks + bonds */
std::vector<NvBlastBondFractureData> bondEvents; /* num lower-support chunks + bonds */
chunkEvents.resize(testAsset->solverChunks.size());
bondEvents.resize(testAsset->solverBonds.size());
NvBlastFractureBuffers events = { static_cast<uint32_t>(bondEvents.size()), static_cast<uint32_t>(chunkEvents.size()), bondEvents.data(), chunkEvents.data() };
std::vector<float> scratch(chunkEvents.size() + bondEvents.size(), 0.0f);
std::vector<char> splitScratch;
std::vector<NvBlastActor*> newActorsBuffer(testAsset->solverChunks.size());
NvBlastExtRadialDamageDesc damage = {
compressiveDamage,
{ localPos.x, localPos.y, localPos.z },
minRadius,
maxRadius
};
NvBlastExtProgramParams programParams =
{
&damage,
nullptr
};
NvBlastDamageProgram program = {
NvBlastExtFalloffGraphShader,
nullptr
};
size_t totalNewActorsCount = 0;
for (std::set<NvBlastActor*>::iterator k = actorsToDamage.begin(); k != actorsToDamage.end();)
{
NvBlastActor* actor = *k;
NvBlastActorGenerateFracture(&events, actor, program, &programParams, nullptr, nullptr);
NvBlastActorApplyFracture(&events, actor, &events, nullptr, nullptr);
bool removeActor = false;
if (events.bondFractureCount + events.chunkFractureCount > 0)
{
NvBlastActorSplitEvent splitEvent;
splitEvent.newActors = &newActorsBuffer.data()[totalNewActorsCount];
uint32_t newActorSize = (uint32_t)(newActorsBuffer.size() - totalNewActorsCount);
splitScratch.resize((size_t)NvBlastActorGetRequiredScratchForSplit(actor, nullptr));
const size_t newActorsCount = NvBlastActorSplit(&splitEvent, actor, newActorSize, splitScratch.data(), nullptr, nullptr);
totalNewActorsCount += newActorsCount;
removeActor = splitEvent.deletedActor != NULL;
}
if (removeActor)
{
k = actorsToDamage.erase(k);
}
else
{
++k;
}
}
for (size_t i = 0; i < totalNewActorsCount; ++i)
{
actorsToDamage.insert(newActorsBuffer[i]);
}
}
template<int FailLevel, int Verbosity>
class MultithreadingTest : public BlastBaseTest<FailLevel, Verbosity>
{
public:
MultithreadingTest()
{
}
static void messageLog(int type, const char* msg, const char* file, int line)
{
BlastBaseTest<FailLevel, Verbosity>::messageLog(type, msg, file, line);
}
static void* alloc(size_t size)
{
return BlastBaseTest<FailLevel, Verbosity>::alignedZeroedAlloc(size);
}
static void free(void* mem)
{
BlastBaseTest<FailLevel, Verbosity>::alignedFree(mem);
}
static void testActorVisibleChunks(const Nv::Blast::Actor& actor, NvBlastLog)
{
const Nv::Blast::Asset& asset = *actor.getAsset();
const NvBlastChunk* chunks = asset.getChunks();
if (actor.isSubSupportChunk())
{
EXPECT_EQ(1, actor.getVisibleChunkCount());
const uint32_t firstVisibleChunkIndex = (uint32_t)Nv::Blast::Actor::VisibleChunkIt(actor);
EXPECT_EQ(actor.getIndex() - asset.m_graph.m_nodeCount, firstVisibleChunkIndex - asset.m_firstSubsupportChunkIndex);
// Make sure the visible chunk is subsupport
// Array of support flags
std::vector<bool> isSupport(asset.m_chunkCount, false);
for (uint32_t i = 0; i < asset.m_graph.m_nodeCount; ++i)
{
isSupport[asset.m_graph.getChunkIndices()[i]] = true;
}
// Climb hierarchy to find support chunk
uint32_t chunkIndex = firstVisibleChunkIndex;
while (chunkIndex != Nv::Blast::invalidIndex<uint32_t>())
{
if (isSupport[chunkIndex])
{
break;
}
chunkIndex = chunks[chunkIndex].parentChunkIndex;
}
EXPECT_FALSE(Nv::Blast::isInvalidIndex(chunkIndex));
}
else
{
// Array of visibility flags
std::vector<bool> isVisible(asset.m_chunkCount, false);
for (Nv::Blast::Actor::VisibleChunkIt i = actor; (bool)i; ++i)
{
isVisible[(uint32_t)i] = true;
}
// Mark visible nodes representing graph chunks
std::vector<bool> visibleChunkFound(asset.m_chunkCount, false);
// Make sure every graph chunk is represented by a visible chunk
for (Nv::Blast::Actor::GraphNodeIt i = actor; (bool)i; ++i)
{
const uint32_t graphNodeIndex = (uint32_t)i;
uint32_t chunkIndex = asset.m_graph.getChunkIndices()[graphNodeIndex];
// Climb hierarchy to find visible chunk
while (chunkIndex != Nv::Blast::invalidIndex<uint32_t>())
{
// Check that chunk owners are accurate
EXPECT_EQ(actor.getIndex(), actor.getFamilyHeader()->getChunkActorIndices()[chunkIndex]);
if (isVisible[chunkIndex])
{
visibleChunkFound[chunkIndex] = true;
break;
}
chunkIndex = chunks[chunkIndex].parentChunkIndex;
}
EXPECT_FALSE(Nv::Blast::isInvalidIndex(chunkIndex));
}
// Check that all visible chunks are accounted for
for (uint32_t i = 0; i < asset.m_chunkCount; ++i)
{
EXPECT_EQ(visibleChunkFound[i], isVisible[i]);
}
// Make sure that, if all siblings are intact, they are invisible
for (uint32_t i = 0; i < asset.m_chunkCount; ++i)
{
bool allIntact = true;
bool noneVisible = true;
if (chunks[i].firstChildIndex < asset.getUpperSupportChunkCount()) // Do not check subsupport
{
for (uint32_t j = chunks[i].firstChildIndex; j < chunks[i].childIndexStop; ++j)
{
allIntact = allIntact && actor.getFamilyHeader()->getChunkActorIndices()[j] == actor.getIndex();
noneVisible = noneVisible && !isVisible[j];
}
EXPECT_TRUE(!allIntact || noneVisible);
}
}
}
}
class DamageActorTask : public TaskDispatcher::Task
{
public:
DamageActorTask(NvBlastActor* actor, GeneratorAsset* asset, GeneratorAsset::Vec3 localPos, float minRadius, float maxRadius, float compressiveDamage, ActorTestFunction testFunction)
: m_asset(asset)
, m_localPos(localPos)
, m_minRadius(minRadius)
, m_maxRadius(maxRadius)
, m_compressiveDamage(compressiveDamage)
, m_testFunction(testFunction)
{
m_actors.insert(actor);
}
virtual void process()
{
blast(m_actors, m_asset, m_localPos, m_minRadius, m_maxRadius, m_compressiveDamage);
// Test individual actors
if (m_testFunction != nullptr)
{
for (std::set<NvBlastActor*>::iterator k = m_actors.begin(); k != m_actors.end(); ++k)
{
m_testFunction(*static_cast<Nv::Blast::Actor*>(*k), messageLog);
}
}
}
const std::set<NvBlastActor*>& getResult() const { return m_actors; }
private:
std::set<NvBlastActor*> m_actors;
GeneratorAsset* m_asset;
GeneratorAsset::Vec3 m_localPos;
float m_minRadius;
float m_maxRadius;
float m_compressiveDamage;
ActorTestFunction m_testFunction;
std::vector<NvBlastActor*> m_resultActors;
};
void damageLeafSupportActorsParallelized
(
uint32_t assetCount,
uint32_t minChunkCount,
uint32_t damageCount,
uint32_t threadCount,
ActorTestFunction actorTestFunction,
PostDamageTestFunction postDamageTestFunction
)
{
const float relativeDamageRadius = 0.05f;
const float compressiveDamage = 1.0f;
srand(0);
std::cout << "Asset # (out of " << assetCount << "): ";
for (uint32_t assetNum = 0; assetNum < assetCount; ++assetNum)
{
std::cout << assetNum + 1 << ".. ";
CubeAssetGenerator::Settings settings;
settings.extents = GeneratorAsset::Vec3(1, 1, 1);
CubeAssetGenerator::DepthInfo depthInfo;
depthInfo.slicesPerAxis = GeneratorAsset::Vec3(1, 1, 1);
depthInfo.flag = NvBlastChunkDesc::Flags::NoFlags;
settings.depths.push_back(depthInfo);
uint32_t chunkCount = 1;
while (chunkCount < minChunkCount)
{
uint32_t chunkMul;
do
{
depthInfo.slicesPerAxis = GeneratorAsset::Vec3((float)(1 + rand() % 4), (float)(1 + rand() % 4), (float)(1 + rand() % 4));
chunkMul = (uint32_t)(depthInfo.slicesPerAxis.x * depthInfo.slicesPerAxis.y * depthInfo.slicesPerAxis.z);
} while (chunkMul == 1);
chunkCount *= chunkMul;
settings.depths.push_back(depthInfo);
settings.extents = settings.extents * depthInfo.slicesPerAxis;
}
settings.depths.back().flag = NvBlastChunkDesc::SupportFlag; // Leaves are support
// Make largest direction unit size
settings.extents = settings.extents * (1.0f / std::max(settings.extents.x, std::max(settings.extents.y, settings.extents.z)));
// Create asset
GeneratorAsset testAsset;
CubeAssetGenerator::generate(testAsset, settings);
NvBlastAssetDesc desc;
desc.chunkDescs = &testAsset.solverChunks[0];
desc.chunkCount = (uint32_t)testAsset.solverChunks.size();
desc.bondDescs = testAsset.solverBonds.data();
desc.bondCount = (uint32_t)testAsset.solverBonds.size();
std::vector<char> scratch;
scratch.resize((size_t)NvBlastGetRequiredScratchForCreateAsset(&desc, messageLog));
void* mem = alloc(NvBlastGetAssetMemorySize(&desc, messageLog));
NvBlastAsset* asset = NvBlastCreateAsset(mem, &desc, scratch.data(), messageLog);
EXPECT_TRUE(asset != nullptr);
NvBlastActorDesc actorDesc;
actorDesc.initialBondHealths = actorDesc.initialSupportChunkHealths = nullptr;
actorDesc.uniformInitialBondHealth = actorDesc.uniformInitialLowerSupportChunkHealth = 1.0f;
void* fmem = alloc(NvBlastAssetGetFamilyMemorySize(asset, messageLog));
NvBlastFamily* family = NvBlastAssetCreateFamily(fmem, asset, nullptr); // Using zeroingAlloc in case actorTest compares memory blocks
scratch.resize((size_t)NvBlastFamilyGetRequiredScratchForCreateFirstActor(family, messageLog));
NvBlastActor* actor = NvBlastFamilyCreateFirstActor(family, &actorDesc, scratch.data(), messageLog);
EXPECT_TRUE(actor != nullptr);
// Run parallelized damage through TaskDispatcher
std::set<NvBlastActor*> resultActors;
{
uint32_t damageNum = 0;
// create DamageActorTask and it to dispatcher helper function
auto addDamageTaskFunction = [&](TaskDispatcher& dispatcher, NvBlastActor* actor)
{
GeneratorAsset::Vec3 localPos = settings.extents*GeneratorAsset::Vec3((float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f, (float)rand() / RAND_MAX - 0.5f);
auto newTask = std::unique_ptr<DamageActorTask>(new DamageActorTask(actor, &testAsset, localPos, relativeDamageRadius, relativeDamageRadius*1.2f, compressiveDamage, actorTestFunction));
dispatcher.addTask(std::move(newTask));
};
// on task finished function for dispatcher (main thread)
TaskDispatcher::OnTaskFinishedFunction onTaskFinishedFunction = [&](TaskDispatcher& dispatcher, std::unique_ptr<TaskDispatcher::Task> task) {
const DamageActorTask* damageTask = static_cast<const DamageActorTask*>(task.get());
const std::set<NvBlastActor*>& actors = damageTask->getResult();
for (NvBlastActor* actor : actors)
{
if (damageNum >= damageCount)
{
resultActors.insert(actor);
}
else
{
damageNum++;
addDamageTaskFunction(dispatcher, actor);
}
}
};
// create dispatcher, add first task and run
TaskDispatcher dispatcher(threadCount, onTaskFinishedFunction);
addDamageTaskFunction(dispatcher, actor);
dispatcher.process();
}
// Test fractured actor set
if (postDamageTestFunction)
{
std::vector<NvBlastActor*> actorArray(resultActors.begin(), resultActors.end());
postDamageTestFunction(actorArray, messageLog);
}
// Release remaining actors
for (std::set<NvBlastActor*>::iterator k = resultActors.begin(); k != resultActors.end(); ++k)
{
NvBlastActorDeactivate(*k, messageLog);
}
resultActors.clear();
const uint32_t actorCount = NvBlastFamilyGetActorCount(family, messageLog);
EXPECT_TRUE(actorCount == 0);
free(family);
// Release asset data
free(asset);
}
std::cout << "done.\n";
}
};
// Specializations
typedef MultithreadingTest<NvBlastMessage::Error, 1> MultithreadingTestAllowWarnings;
typedef MultithreadingTest<NvBlastMessage::Error, 1> MultithreadingTestStrict;
TEST_F(MultithreadingTestStrict, MultithreadingTestDamageLeafSupportActorsTestVisibility)
{
damageLeafSupportActorsParallelized(1, 1000, 50, 4, testActorVisibleChunks, nullptr);
}
TEST_F(MultithreadingTestStrict, MultithreadingTestDamageLeafSupportActors)
{
damageLeafSupportActorsParallelized(1, 3000, 1000, 4, nullptr, nullptr);
}
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/TkCompositeTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "TkBaseTest.h"
#include <map>
#include <random>
#include <algorithm>
#include "NsMemoryBuffer.h"
#include "NvBlastTime.h"
/*
Composite and joint tests:
0) Test serialization of composites and assemblies
1) Create assembly, actors and joints should be created automatically
2) Create an actor with internal joints. Splitting the actor should cause joint create events to be dispatched
3) Joint update events should be fired when attached actors change
4) Joint delete events should be fired when at least one attached actor is deleted
5) Creating a composite from assets with internal joints should have expected behaviors (1-4) above
*/
struct Composite
{
std::vector<TkActorDesc> m_actorDescs;
std::vector<nvidia::NvTransform> m_relTMs;
std::vector<TkJointDesc> m_jointDescs;
};
template<int FailLevel, int Verbosity>
class TkCompositeTest : public TkBaseTest<FailLevel, Verbosity>
{
public:
// Composite/joint tests
void createAssembly(std::vector<TkActor*>& actors, std::vector<TkJoint*>& joints, bool createNRFJoints)
{
TkFramework* fw = NvBlastTkFrameworkGet();
actors.resize(4, nullptr);
actors[0] = fw->createActor(TkActorDesc(testAssets[0]));
actors[1] = fw->createActor(TkActorDesc(testAssets[0]));
actors[2] = fw->createActor(TkActorDesc(testAssets[1]));
actors[3] = fw->createActor(TkActorDesc(testAssets[1]));
std::vector<TkFamily*> families(4);
families[0] = &actors[0]->getFamily();
families[1] = &actors[1]->getFamily();
families[2] = &actors[2]->getFamily();
families[3] = &actors[3]->getFamily();
EXPECT_FALSE(actors[0] == nullptr);
EXPECT_FALSE(actors[1] == nullptr);
EXPECT_FALSE(actors[2] == nullptr);
EXPECT_FALSE(actors[3] == nullptr);
const TkJointDesc jointDescsNoNRF[8] =
{
// Actor indices, chunk indices, attach position in the composite frame
{ { families[0], families[1] }, { 6, 5 }, { NvVec3(0.0f, -1.5f, 0.5f), NvVec3(0.0f, -1.5f, 0.5f) } },
{ { families[0], families[1] }, { 4, 3 }, { NvVec3(0.0f, -0.5f, -0.5f), NvVec3(0.0f, -0.5f, -0.5f) } },
{ { families[0], families[2] }, { 8, 6 }, { NvVec3(-0.5f, 0.0f, 0.5f), NvVec3(-0.5f, 0.0f, 0.5f) } },
{ { families[0], families[2] }, { 3, 1 }, { NvVec3(-1.5f, 0.0f, -0.5f), NvVec3(-1.5f, 0.0f, -0.5f) } },
{ { families[1], families[3] }, { 7, 5 }, { NvVec3(0.5f, 0.0f, 0.5f), NvVec3(0.5f, 0.0f, 0.5f) } },
{ { families[1], families[3] }, { 4, 2 }, { NvVec3(1.0f, 0.0f, -0.5f), NvVec3(1.0f, 0.0f, -0.5f) } },
{ { families[2], families[3] }, { 8, 7 }, { NvVec3(0.0f, 1.5f, 0.5f), NvVec3(0.0f, 1.5f, 0.5f) } },
{ { families[2], families[3] }, { 2, 1 }, { NvVec3(0.0f, 0.5f, -0.5f), NvVec3(0.0f, 0.5f, -0.5f) } }
};
const TkJointDesc jointDescsWithNRF[12] =
{
// Actor indices, chunk indices, attach position in the composite frame
{ { families[0], families[1] }, { 6, 5 }, { NvVec3(0.0f, -1.5f, 0.5f), NvVec3(0.0f, -1.5f, 0.5f) } },
{ { families[0], families[1] }, { 4, 3 }, { NvVec3(0.0f, -0.5f, -0.5f), NvVec3(0.0f, -0.5f, -0.5f) } },
{ { families[0], nullptr }, { 8, 0xFFFFFFFF }, { NvVec3(-0.5f, 0.0f, 0.5f), NvVec3(-0.5f, 0.0f, 0.5f) } },
{ { families[0], nullptr }, { 3, 0xFFFFFFFF }, { NvVec3(-1.5f, 0.0f, -0.5f), NvVec3(-1.5f, 0.0f, -0.5f) } },
{ { nullptr, families[2] }, { 0xFFFFFFFF, 6 }, { NvVec3(-0.5f, 0.0f, 0.5f), NvVec3(-0.5f, 0.0f, 0.5f) } },
{ { nullptr, families[2] }, { 0xFFFFFFFF, 1 }, { NvVec3(-1.5f, 0.0f, -0.5f), NvVec3(-1.5f, 0.0f, -0.5f) } },
{ { families[1], nullptr }, { 7, 0xFFFFFFFF }, { NvVec3(0.5f, 0.0f, 0.5f), NvVec3(0.5f, 0.0f, 0.5f) } },
{ { families[1], nullptr }, { 4, 0xFFFFFFFF }, { NvVec3(1.0f, 0.0f, -0.5f), NvVec3(1.0f, 0.0f, -0.5f) } },
{ { nullptr, families[3] }, { 0xFFFFFFFF, 5 }, { NvVec3(0.5f, 0.0f, 0.5f), NvVec3(0.5f, 0.0f, 0.5f) } },
{ { nullptr, families[3] }, { 0xFFFFFFFF, 2 }, { NvVec3(1.0f, 0.0f, -0.5f), NvVec3(1.0f, 0.0f, -0.5f) } },
{ { families[2], families[3] }, { 8, 7 }, { NvVec3(0.0f, 1.5f, 0.5f), NvVec3(0.0f, 1.5f, 0.5f) } },
{ { families[2], families[3] }, { 2, 1 }, { NvVec3(0.0f, 0.5f, -0.5f), NvVec3(0.0f, 0.5f, -0.5f), } }
};
const TkJointDesc* jointDescs = createNRFJoints ? jointDescsWithNRF : jointDescsNoNRF;
const int jointCount = createNRFJoints ? 12 : 8;
joints.resize(jointCount, nullptr);
for (int i = 0; i < jointCount; ++i)
{
joints[i] = fw->createJoint(jointDescs[i]);
EXPECT_FALSE(joints[i] == nullptr);
}
}
void familySerialization(std::vector<TkFamily*>& families, TestFamilyTracker& tracker)
{
#if 1
NV_UNUSED(families);
NV_UNUSED(tracker);
#else
TkFramework* fw = NvBlastTkFrameworkGet();
PsMemoryBuffer* membuf = NVBLAST_NEW(PsMemoryBuffer);
EXPECT_TRUE(membuf != nullptr);
if (membuf == nullptr)
{
return;
}
std::vector<TkFamily*> oldFamilies = families;
for (size_t familyNum = 0; familyNum < families.size(); ++familyNum)
{
GTEST_FATAL_FAILURE_("Serialization of families needs to be put into extensions.");
// families[familyNum]->serialize(*membuf);
}
for (size_t familyNum = 0; familyNum < families.size(); ++familyNum)
{
TkFamily* f = families[familyNum];
std::vector<TkActor*> actors(f->getActorCount());
f->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (auto a : actors)
{
tracker.eraseActor(a);
}
f->release();
families[familyNum] = nullptr;
}
for (size_t familyNum = 0; familyNum < families.size(); ++familyNum)
{
GTEST_FATAL_FAILURE_("Deserialization of families needs to be put into extensions.");
// TkFamily* f = reinterpret_cast<TkFamily*>(fw->deserialize(*membuf));
// f->addListener(tracker);
// families[familyNum] = f;
}
for (size_t familyNum = 0; familyNum < families.size(); ++familyNum)
{
TkFamily* f = families[familyNum];
std::vector<TkActor*> actors(f->getActorCount());
f->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (auto a : actors)
{
tracker.insertActor(a);
std::vector<TkJoint*> joints(a->getJointCount());
a->getJoints(joints.data(), (uint32_t)joints.size());
for (auto j : joints)
{
const TkJointData jd = j->getData();
if (jd.actors[0] != jd.actors[1])
{
tracker.joints.insert(j);
}
}
}
}
membuf->release();
#endif
}
void recollectActors(std::vector<TkFamily*>& families, std::vector<TkActor*>& actors)
{
uint32_t totalActorCount = 0;
for (auto family : families)
{
EXPECT_LE(family->getActorCount() + totalActorCount, actors.size());
totalActorCount += family->getActors(actors.data() + totalActorCount, static_cast<uint32_t>(actors.size()) - totalActorCount);
}
}
void assemblyCreateAndRelease(bool createNRFJoints, bool serializationTest)
{
createFramework();
createTestAssets();
TkFramework* fw = NvBlastTkFrameworkGet();
const TkType* familyType = fw->getType(TkTypeIndex::Family);
EXPECT_TRUE(familyType != nullptr);
TestFamilyTracker tracker;
std::vector<TkFamily*> families1;
std::vector<TkFamily*> families2;
// Create one assembly
std::vector<TkActor*> actors1;
std::vector<TkJoint*> joints1;
createAssembly(actors1, joints1, createNRFJoints);
tracker.joints.insert(joints1.begin(), joints1.end());
// Create another assembly
std::vector<TkActor*> actors2;
std::vector<TkJoint*> joints2;
createAssembly(actors2, joints2, createNRFJoints);
tracker.joints.insert(joints2.begin(), joints2.end());
// Store families and fill group
for (size_t actorNum = 0; actorNum < actors1.size(); ++actorNum)
{
TkFamily& family = actors1[actorNum]->getFamily();
families1.push_back(&family);
family.addListener(tracker);
}
for (size_t actorNum = 0; actorNum < actors2.size(); ++actorNum)
{
TkFamily& family = actors2[actorNum]->getFamily();
families2.push_back(&family);
family.addListener(tracker);
}
if (serializationTest)
{
familySerialization(families1, tracker);
recollectActors(families1, actors1);
familySerialization(families2, tracker);
recollectActors(families2, actors2);
}
EXPECT_EQ(joints1.size() + joints2.size(), tracker.joints.size());
// Release 1st assembly's actors
for (size_t actorNum = 0; actorNum < actors1.size(); ++actorNum)
{
actors1[actorNum]->release();
}
if (serializationTest)
{
familySerialization(families2, tracker);
recollectActors(families2, actors2);
}
EXPECT_EQ(joints2.size(), tracker.joints.size());
// Release 2nd assembly's actors
for (size_t actorNum = 0; actorNum < actors1.size(); ++actorNum)
{
actors2[actorNum]->release();
}
EXPECT_EQ(0, tracker.joints.size());
releaseTestAssets();
releaseFramework();
}
void assemblyInternalJoints(bool testAssemblySerialization)
{
createFramework();
createTestAssets(true); // Create assets with internal joints
TkFramework* fw = NvBlastTkFrameworkGet();
TestFamilyTracker tracker;
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fw->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
TkActorDesc adesc(testAssets[0]);
TkActor* actor1 = fw->createActor(adesc);
EXPECT_TRUE(actor1 != nullptr);
tracker.insertActor(actor1);
actor1->getFamily().addListener(tracker);
TkFamily* family = &actor1->getFamily();
group->addActor(*actor1);
CSParams cs2(2, 0.0f);
NvBlastExtProgramParams csParams2 = { &cs2, nullptr };
actor1->damage(getCubeSlicerProgram(), &csParams2);
EXPECT_EQ((size_t)0, tracker.joints.size());
m_groupTM->process();
m_groupTM->wait();
if (testAssemblySerialization)
{
std::vector<TkFamily*> families;
families.push_back(family);
familySerialization(families, tracker);
family = families[0];
std::vector<TkActor*> actors(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (TkActor* actor : actors)
{
group->addActor(*actor);
}
}
EXPECT_EQ((size_t)2, family->getActorCount());
EXPECT_EQ((size_t)4, tracker.joints.size()); // 2) Create an actor with internal joints. Splitting the actor should cause joint create events to be dispatched
std::vector<TkActor*> actors(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (TkJoint* joint : tracker.joints)
{
TkJointData jd = joint->getData();
EXPECT_FALSE(actors.end() == std::find(actors.begin(), actors.end(), jd.actors[0]));
EXPECT_FALSE(actors.end() == std::find(actors.begin(), actors.end(), jd.actors[1]));
}
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialParams = { &radialDamage, nullptr };
for (TkActor* actor : actors)
{
actor->damage(getFalloffProgram(), &radialParams);
}
m_groupTM->process();
m_groupTM->wait();
if (testAssemblySerialization)
{
std::vector<TkFamily*> families;
families.push_back(family);
familySerialization(families, tracker);
family = families[0];
}
EXPECT_EQ((size_t)8, family->getActorCount());
EXPECT_EQ((size_t)4, tracker.joints.size());
// 3) Joint update events should be fired when attached actors change
actors.resize(family->getActorCount());
family->getActors(actors.data(), static_cast<uint32_t>(actors.size()));
for (TkJoint* joint : tracker.joints)
{
TkJointData jd = joint->getData();
EXPECT_FALSE(actors.end() == std::find(actors.begin(), actors.end(), jd.actors[0]));
EXPECT_FALSE(actors.end() == std::find(actors.begin(), actors.end(), jd.actors[1]));
}
for (TkActor* actor : actors)
{
actor->release();
}
EXPECT_EQ((size_t)0, tracker.joints.size()); // 4) Joint delete events should be fired when at least one attached actor is deleted
group->release();
releaseTestAssets();
releaseFramework();
}
void assemblyCompositeWithInternalJoints(bool createNRFJoints, bool serializationTest)
{
createFramework();
createTestAssets(true); // Create assets with internal joints
TkFramework* fw = NvBlastTkFrameworkGet();
const TkType* familyType = fw->getType(TkTypeIndex::Family);
EXPECT_TRUE(familyType != nullptr);
if (familyType == nullptr)
{
return;
}
TestFamilyTracker tracker;
std::vector<TkFamily*> families;
// Create assembly
std::vector<TkActor*> actors;
std::vector<TkJoint*> joints;
createAssembly(actors, joints, createNRFJoints);
tracker.joints.insert(joints.begin(), joints.end());
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = fw->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
for (size_t i = 0; i < actors.size(); ++i)
{
TkFamily& family = actors[i]->getFamily();
families.push_back(&family);
family.addListener(tracker);
tracker.insertActor(actors[i]);
group->addActor(*actors[i]);
}
if (serializationTest)
{
familySerialization(families, tracker);
recollectActors(families, actors);
for (auto actor : actors)
{
group->addActor(*actor);
}
}
EXPECT_EQ((size_t)4, actors.size());
const size_t compJointCount = createNRFJoints ? (size_t)12 : (size_t)8;
EXPECT_EQ(compJointCount, tracker.joints.size());
CSParams cs2(2, 0.0f);
NvBlastExtProgramParams csParams2 = { &cs2, nullptr };
size_t totalActorCount = 0;
for (uint32_t i = 0; i < 4; ++i)
{
actors[i]->damage(getCubeSlicerProgram(), &csParams2);
m_groupTM->process();
m_groupTM->wait();
if (serializationTest)
{
familySerialization(families, tracker);
for (size_t j = 0; j < families.size(); ++j)
{
TkFamily* family = families[j];
std::vector<TkActor*> a(family->getActorCount());
family->getActors(a.data(), static_cast<uint32_t>(a.size()));
for (auto actor : a)
{
group->addActor(*actor);
}
EXPECT_TRUE(j <= i || a.size() == 1);
if (j > i && a.size() == 1)
{
actors[j] = a[0];
}
}
}
EXPECT_EQ((size_t)2, families[i]->getActorCount());
EXPECT_EQ((size_t)(compJointCount + 4 * (i + 1)), tracker.joints.size()); // Four joints created per actor
totalActorCount += families[i]->getActorCount();
}
actors.resize(totalActorCount);
totalActorCount = 0;
for (int i = 0; i < 4; ++i)
{
families[i]->getActors(actors.data() + totalActorCount, families[i]->getActorCount());
totalActorCount += families[i]->getActorCount();
}
for (TkJoint* joint : tracker.joints)
{
TkJointData jd = joint->getData();
EXPECT_TRUE(jd.actors[0] == nullptr || actors.end() != std::find(actors.begin(), actors.end(), jd.actors[0]));
EXPECT_TRUE(jd.actors[1] == nullptr || actors.end() != std::find(actors.begin(), actors.end(), jd.actors[1]));
}
NvBlastExtRadialDamageDesc radialDamage = getRadialDamageDesc(0, 0, 0);
NvBlastExtProgramParams radialParams = { &radialDamage, nullptr };
for (TkActor* actor : actors)
{
actor->damage(getFalloffProgram(), &radialParams);
}
m_groupTM->process();
m_groupTM->wait();
totalActorCount = 0;
for (int i = 0; i < 4; ++i)
{
totalActorCount += families[i]->getActorCount();
}
if (serializationTest)
{
familySerialization(families, tracker);
}
EXPECT_EQ((size_t)32, totalActorCount);
EXPECT_EQ(compJointCount + (size_t)16, tracker.joints.size());
actors.resize(totalActorCount);
totalActorCount = 0;
for (int i = 0; i < 4; ++i)
{
families[i]->getActors(actors.data() + totalActorCount, families[i]->getActorCount());
totalActorCount += families[i]->getActorCount();
}
// 3) Joint update events should be fired when attached actors change
for (TkActor* actor : actors)
{
actor->release();
}
EXPECT_EQ((size_t)0, tracker.joints.size()); // 4) Joint delete events should be fired when at least one attached actor is deleted
group->release();
releaseTestAssets();
releaseFramework();
}
void assemblyExternalJoints_MultiFamilyDamage(bool explicitJointRelease = true)
{
createFramework();
const NvBlastChunkDesc chunkDescs[3] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 4.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 0.0f,-1.0f, 0.0f }, 2.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.0f, 1.0f, 0.0f }, 2.0f, 0, NvBlastChunkDesc::SupportFlag, 2 }
};
const NvBlastBondDesc bondDesc =
// normal area centroid userData chunks
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 0.0f, 0.0f, 0.0f }, 0 }, { 1, 2 } };
TkFramework* framework = NvBlastTkFrameworkGet();
TestFamilyTracker tracker;
TkAssetDesc desc;
desc.chunkCount = 3;
desc.chunkDescs = chunkDescs;
desc.bondCount = 1;
desc.bondDescs = &bondDesc;
desc.bondFlags = nullptr;
TkAsset* asset = framework->createAsset(desc);
EXPECT_TRUE(asset != nullptr);
TkGroupDesc gdesc;
gdesc.workerCount = m_taskman->getCpuDispatcher()->getWorkerCount();
TkGroup* group = framework->createGroup(gdesc);
EXPECT_TRUE(group != nullptr);
m_groupTM->setGroup(group);
TkActorDesc adesc(asset);
TkActor* actor1 = framework->createActor(adesc);
EXPECT_TRUE(actor1 != nullptr);
TkActor* actor2 = framework->createActor(adesc);
EXPECT_TRUE(actor2 != nullptr);
group->addActor(*actor1);
group->addActor(*actor2);
TkFamily* family1 = &actor1->getFamily();
TkFamily* family2 = &actor2->getFamily();
family1->addListener(tracker);
family2->addListener(tracker);
tracker.insertActor(actor1);
tracker.insertActor(actor2);
TkJointDesc jdesc;
jdesc.families[0] = family1;
jdesc.families[1] = family2;
jdesc.chunkIndices[0] = 2;
jdesc.chunkIndices[1] = 1;
jdesc.attachPositions[0] = NvVec3(0.0f, 1.0f, 0.0f);
jdesc.attachPositions[1] = NvVec3(0.0f, -1.0f, 0.0f);
TkJoint* joint = framework->createJoint(jdesc);
EXPECT_TRUE(joint != nullptr);
tracker.joints.insert(joint);
NvBlastExtRadialDamageDesc radialDamage1 = getRadialDamageDesc(0, 1, 0, 2, 2);
NvBlastExtProgramParams radialParams1 = { &radialDamage1, nullptr };
actor1->damage(getFalloffProgram(), &radialParams1);
NvBlastExtRadialDamageDesc radialDamage2 = getRadialDamageDesc(0, -1, 0, 2, 2);
NvBlastExtProgramParams radialParams2 = { &radialDamage2, nullptr };
actor2->damage(getFalloffProgram(), &radialParams2);
m_groupTM->process();
m_groupTM->wait();
TkActor* actors1[2];
TkActor* actors2[2];
EXPECT_EQ(2, family1->getActors(actors1, 2));
EXPECT_EQ(2, family2->getActors(actors2, 2));
const TkJointData jdata = joint->getData();
EXPECT_TRUE(jdata.actors[0] != nullptr);
EXPECT_TRUE(jdata.actors[1] != nullptr);
EXPECT_TRUE(&jdata.actors[0]->getFamily() == family1);
EXPECT_TRUE(&jdata.actors[1]->getFamily() == family2);
// Clean up
if (explicitJointRelease)
{
joint->release();
family2->release();
family1->release();
asset->release();
releaseFramework();
}
else
{
EXPECT_EQ(1, tracker.joints.size());
releaseFramework();
// Commenting these out - but shouldn't we be sending delete events when we release the framework?
// EXPECT_EQ(0, tracker.joints.size());
// EXPECT_EQ(0, tracker.actors.size());
}
}
protected:
// http://clang.llvm.org/compatibility.html#dep_lookup_bases
// http://stackoverflow.com/questions/6592512/templates-parent-class-member-variables-not-visible-in-inherited-class
using TkBaseTest<FailLevel, Verbosity>::testAssets;
using TkBaseTest<FailLevel, Verbosity>::m_taskman;
using TkBaseTest<FailLevel, Verbosity>::m_groupTM;
using TkBaseTest<FailLevel, Verbosity>::createFramework;
using TkBaseTest<FailLevel, Verbosity>::releaseFramework;
using TkBaseTest<FailLevel, Verbosity>::createTestAssets;
using TkBaseTest<FailLevel, Verbosity>::releaseTestAssets;
using TkBaseTest<FailLevel, Verbosity>::getCubeSlicerProgram;
using TkBaseTest<FailLevel, Verbosity>::getDefaultMaterial;
using TkBaseTest<FailLevel, Verbosity>::getRadialDamageDesc;
using TkBaseTest<FailLevel, Verbosity>::getFalloffProgram;
};
typedef TkCompositeTest<NvBlastMessage::Error, 1> TkCompositeTestAllowWarnings;
typedef TkCompositeTest<NvBlastMessage::Error, 1> TkCompositeTestStrict;
/*
1) Create assembly, actors and joints should be created automatically
*/
TEST_F(TkCompositeTestStrict, AssemblyCreateAndRelease_NoNRFJoints_NoSerialization)
{
assemblyCreateAndRelease(false, false);
}
TEST_F(TkCompositeTestStrict, DISABLED_AssemblyCreateAndRelease_NoNRFJoints_AssemblySerialization)
{
assemblyCreateAndRelease(false, true);
}
TEST_F(TkCompositeTestStrict, AssemblyCreateAndRelease_WithNRFJoints_NoSerialization)
{
assemblyCreateAndRelease(true, false);
}
TEST_F(TkCompositeTestStrict, DISABLED_AssemblyCreateAndRelease_WithNRFJoints_AssemblySerialization)
{
assemblyCreateAndRelease(true, true);
}
/**
2) Create an actor with internal joints. Splitting the actor should cause joint create events to be dispatched
3) Joint update events should be fired when attached actors change
4) Joint delete events should be fired when at least one attached actor is deleted
*/
TEST_F(TkCompositeTestStrict, AssemblyInternalJoints_NoSerialization)
{
assemblyInternalJoints(false);
}
TEST_F(TkCompositeTestStrict, DISABLED_AssemblyInternalJoints_AssemblySerialization)
{
assemblyInternalJoints(true);
}
/**
5) Creating a composite from assets with internal joints should have expected behaviors (1-4) above
*/
TEST_F(TkCompositeTestStrict, AssemblyCompositeWithInternalJoints_NoNRFJoints_NoSerialization)
{
assemblyCompositeWithInternalJoints(false, false);
}
TEST_F(TkCompositeTestStrict, DISABLED_AssemblyCompositeWithInternalJoints_NoNRFJoints_AssemblySerialization)
{
assemblyCompositeWithInternalJoints(false, true);
}
TEST_F(TkCompositeTestStrict, AssemblyCompositeWithInternalJoints_WithNRFJoints_NoSerialization)
{
assemblyCompositeWithInternalJoints(true, false);
}
TEST_F(TkCompositeTestStrict, DISABLED_AssemblyCompositeWithInternalJoints_WithNRFJoints_AssemblySerialization)
{
assemblyCompositeWithInternalJoints(true, true);
}
/*
More tests
*/
TEST_F(TkCompositeTestStrict, AssemblyExternalJoints_MultiFamilyDamage)
{
assemblyExternalJoints_MultiFamilyDamage(true);
}
TEST_F(TkCompositeTestStrict, AssemblyExternalJoints_MultiFamilyDamage_AutoJointRelease)
{
assemblyExternalJoints_MultiFamilyDamage(false);
}
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/unit/FamilyGraphTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "BlastBaseTest.h"
#include "NvBlastSupportGraph.h"
#include "NvBlastFamilyGraph.h"
#include "NvBlastAssert.h"
#include "NvBlastIndexFns.h"
#include <stdlib.h>
#include <ostream>
#include <stdint.h>
#include <map>
#include <algorithm>
// ====================================================================================================================
// HELPERS
// ====================================================================================================================
::testing::AssertionResult VectorMatch(const std::vector<uint32_t>& actual, const uint32_t* expected, uint32_t size)
{
for (size_t i(0); i < size; ++i)
{
if (expected[i] != actual[i])
{
testing::Message msg;
msg << "array[" << i
<< "] (" << actual[i] << ") != expected[" << i
<< "] (" << expected[i] << ")";
return (::testing::AssertionFailure(msg));;
}
}
return ::testing::AssertionSuccess();
}
#define VECTOR_MATCH(actual, ...) \
{ \
const uint32_t arr[] = { __VA_ARGS__ }; \
const uint32_t size = (sizeof(arr) / sizeof(arr[0])); \
EXPECT_EQ(size, actual.size()); \
EXPECT_TRUE(VectorMatch(actual, arr, size)); \
}
// ====================================================================================================================
// TEST CLASS
// ====================================================================================================================
using namespace Nv::Blast;
template<int FailLevel, int Verbosity>
class FamilyGraphTest : public BlastBaseTest < FailLevel, Verbosity >
{
public:
FamilyGraphTest()
{
}
protected:
FamilyGraph* buildFamilyGraph(uint32_t chunkCount, const uint32_t* adjacentChunkPartition, const uint32_t* adjacentChunkIndices)
{
NVBLAST_ASSERT(m_memoryBlock.size() == 0); // can't build twice per test
// Fill SupportGraph with data:
NvBlastCreateOffsetStart(sizeof(SupportGraph));
const size_t NvBlastCreateOffsetAlign16(chunkIndicesOffset, chunkCount*sizeof(uint32_t));
const size_t NvBlastCreateOffsetAlign16(adjacencyPartitionOffset, (chunkCount + 1)*sizeof(uint32_t));
const size_t NvBlastCreateOffsetAlign16(adjacentNodeIndicesOffset, adjacentChunkPartition[chunkCount] * sizeof(uint32_t));
const size_t NvBlastCreateOffsetAlign16(adjacentBondIndicesOffset, adjacentChunkPartition[chunkCount] * sizeof(uint32_t));
const size_t graphDataSize = NvBlastCreateOffsetEndAlign16();
m_graphMemory.resize(graphDataSize);
m_graph = reinterpret_cast<SupportGraph*>(m_graphMemory.data());
m_graph->m_nodeCount = chunkCount;
m_graph->m_chunkIndicesOffset = static_cast<uint32_t>(chunkIndicesOffset);
m_graph->m_adjacencyPartitionOffset = static_cast<uint32_t>(adjacencyPartitionOffset);
m_graph->m_adjacentNodeIndicesOffset = static_cast<uint32_t>(adjacentNodeIndicesOffset);
m_graph->m_adjacentBondIndicesOffset = static_cast<uint32_t>(adjacentBondIndicesOffset);
memcpy(m_graph->getAdjacencyPartition(), adjacentChunkPartition, (chunkCount + 1) * sizeof(uint32_t));
memcpy(m_graph->getAdjacentNodeIndices(), adjacentChunkIndices, adjacentChunkPartition[chunkCount] * sizeof(uint32_t));
// fill bondIndices by incrementing bondIndex and putting same bondIndex in mirror bond index for (n0, n1) == (n1, n0)
memset(m_graph->getAdjacentBondIndices(), (uint32_t)-1, adjacentChunkPartition[chunkCount] * sizeof(uint32_t));
uint32_t bondIndex = 0;
for (uint32_t chunk0 = 0; chunk0 < m_graph->m_nodeCount; chunk0++)
{
for (uint32_t i = m_graph->getAdjacencyPartition()[chunk0]; i < m_graph->getAdjacencyPartition()[chunk0 + 1]; i++)
{
if (m_graph->getAdjacentBondIndices()[i] == (uint32_t)-1)
{
m_graph->getAdjacentBondIndices()[i] = bondIndex;
uint32_t chunk1 = m_graph->getAdjacentNodeIndices()[i];
for (uint32_t j = m_graph->getAdjacencyPartition()[chunk1]; j < m_graph->getAdjacencyPartition()[chunk1 + 1]; j++)
{
if (m_graph->getAdjacentNodeIndices()[j] == chunk0)
{
m_graph->getAdjacentBondIndices()[j] = bondIndex;
}
}
bondIndex++;
}
}
}
// reserve memory for family graph and asset pointer
uint32_t familyGraphMemorySize = (uint32_t)FamilyGraph::requiredMemorySize(m_graph->m_nodeCount, bondIndex);
m_memoryBlock.resize(familyGraphMemorySize);
// placement new family graph
const uint32_t bondCount = m_graph->getAdjacencyPartition()[m_graph->m_nodeCount] / 2;
FamilyGraph* familyGraph = new(m_memoryBlock.data()) FamilyGraph(m_graph->m_nodeCount, bondCount);
return familyGraph;
}
struct IslandInfo
{
std::vector<NodeIndex> nodes;
};
/**
Function to gather islands info for tests and debug purposes
Returned islands sorted by nodes counts. Island nodes also sorted by NodeIndex.
*/
void getIslandsInfo(const FamilyGraph& graph, std::vector<IslandInfo>& info)
{
IslandId* islandIds = graph.getIslandIds();
std::map<IslandId, IslandInfo> islandMap;
for (NodeIndex n = 0; n < m_graph->m_nodeCount; n++)
{
EXPECT_TRUE(islandIds[n] != invalidIndex<uint32_t>());
IslandId islandId = islandIds[n];
if (islandMap.find(islandId) == islandMap.end())
{
IslandInfo islandInfo;
islandInfo.nodes.push_back(n);
islandMap[islandId] = islandInfo;
}
else
{
islandMap[islandId].nodes.push_back(n);
}
}
for (auto it = islandMap.begin(); it != islandMap.end(); ++it)
{
std::sort(it->second.nodes.begin(), it->second.nodes.end());
info.push_back(it->second);
}
// sort islands by size ascending
std::sort(info.begin(), info.end(), [](const IslandInfo& i0, const IslandInfo& i1) -> bool
{
size_t s0 = i0.nodes.size();
size_t s1 = i1.nodes.size();
if (s0 == s1 && s0 > 0)
{
s0 = i0.nodes[0];
s1 = i1.nodes[0];
}
return s0 < s1;
});
}
static const uint32_t DEFAULT_ACTOR_INDEX = 0;
SupportGraph* m_graph;
std::vector<char> m_graphMemory;
std::vector<char> m_memoryBlock;
};
typedef FamilyGraphTest<NvBlastMessage::Error, 1> FamilyGraphTestAllowWarnings;
typedef FamilyGraphTest<NvBlastMessage::Warning, 1> FamilyGraphTestStrict;
// ====================================================================================================================
// GRAPH DATA
// ====================================================================================================================
// Graph 0:
//
// 0 -- 1 -- 2 -- 3
// | | | |
// | | | |
// 4 -- 5 6 -- 7
//
const uint32_t chunkCount0 = 8;
const uint32_t adjacentChunkPartition0[] = { 0, 2, 5, 8, 10, 12, 14, 16, 18 };
const uint32_t adjacentChunkIndices0[] = { /*0*/ 1, 4, /*1*/ 0, 2, 5, /*2*/ 1, 3, 6, /*3*/ 2, 7, /*4*/ 0, 5, /*5*/ 1, 4, /*6*/ 2, 7, /*7*/ 3, 6 };
// Graph 1:
//
// 0 -- 1 -- 2 -- 3
// | | | |
// 4 -- 5 -- 6 -- 7
// | | | |
// 8 -- 9 -- 10-- 11
//
const uint32_t chunkCount1 = 12;
const uint32_t adjacentChunkPartition1[] = { 0, 2, 5, 8, 10, 13, 17, 21, 24, 26, 29, 32, 34 };
const uint32_t adjacentChunkIndices1[] = { /*0*/ 1, 4, /*1*/ 0, 2, 5, /*2*/ 1, 3, 6, /*3*/ 2, 7, /*4*/ 0, 5, 8, /*5*/ 1, 4, 6, 9, /*6*/ 2, 5, 7, 10,
/*7*/ 3, 6, 11, /*8*/ 4, 9, /*9*/ 5, 8, 10, /*10*/ 6, 9, 11, /*11*/ 7, 10 };
// ====================================================================================================================
// TESTS
// ====================================================================================================================
TEST_F(FamilyGraphTestStrict, Graph0FindIslands0)
{
FamilyGraph* graph = buildFamilyGraph(chunkCount0, adjacentChunkPartition0, adjacentChunkIndices0);
graph->initialize(DEFAULT_ACTOR_INDEX, m_graph);
std::vector<char> scratch;
scratch.resize((size_t)FamilyGraph::findIslandsRequiredScratch(chunkCount0));
EXPECT_EQ(9, graph->getEdgesCount(m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 0, 4, m_graph);
EXPECT_EQ(8, graph->getEdgesCount(m_graph));
EXPECT_EQ(1, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 1, 2, m_graph);
EXPECT_EQ(1, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
std::vector<IslandInfo> info;
getIslandsInfo(*graph, info);
EXPECT_EQ(2, info.size());
VECTOR_MATCH(info[0].nodes, 0, 1, 4, 5);
VECTOR_MATCH(info[1].nodes, 2, 3, 6, 7);
}
TEST_F(FamilyGraphTestStrict, Graph0FindIslands1)
{
FamilyGraph* graph = buildFamilyGraph(chunkCount0, adjacentChunkPartition0, adjacentChunkIndices0);
graph->initialize(DEFAULT_ACTOR_INDEX, m_graph);
std::vector<char> scratch;
scratch.resize((size_t)FamilyGraph::findIslandsRequiredScratch(chunkCount0));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 0, 4, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 4, 5, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 1, 2, m_graph);
EXPECT_EQ(6, graph->getEdgesCount(m_graph));
EXPECT_EQ(3, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
std::vector<IslandInfo> info;
getIslandsInfo(*graph, info);
EXPECT_EQ(3, info.size());
VECTOR_MATCH(info[0].nodes, 4);
VECTOR_MATCH(info[1].nodes, 0, 1, 5);
VECTOR_MATCH(info[2].nodes, 2, 3, 6, 7);
}
TEST_F(FamilyGraphTestStrict, Graph0FindIslandsDifferentActors)
{
const uint32_t ACTOR_0_INDEX = 5;
const uint32_t ACTOR_1_INDEX = 2;
FamilyGraph* graph = buildFamilyGraph(chunkCount0, adjacentChunkPartition0, adjacentChunkIndices0);
graph->initialize(ACTOR_0_INDEX, m_graph);
std::vector<char> scratch;
scratch.resize((size_t)FamilyGraph::findIslandsRequiredScratch(chunkCount0));
EXPECT_EQ(0, graph->findIslands(ACTOR_1_INDEX, scratch.data(), m_graph));
EXPECT_EQ(1, graph->findIslands(ACTOR_0_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(ACTOR_0_INDEX, 2, 1, m_graph);
EXPECT_EQ(8, graph->getEdgesCount(m_graph));
EXPECT_EQ(1, graph->findIslands(ACTOR_0_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(ACTOR_1_INDEX, 2, 6, m_graph);
graph->notifyEdgeRemoved(ACTOR_1_INDEX, 7, 3, m_graph);
EXPECT_EQ(1, graph->findIslands(ACTOR_1_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(ACTOR_0_INDEX, 0, 1, m_graph);
graph->notifyEdgeRemoved(ACTOR_0_INDEX, 4, 5, m_graph);
EXPECT_EQ(1, graph->findIslands(ACTOR_0_INDEX, scratch.data(), m_graph));
std::vector<IslandInfo> info;
getIslandsInfo(*graph, info);
EXPECT_EQ(4, info.size());
VECTOR_MATCH(info[0].nodes, 0, 4);
VECTOR_MATCH(info[1].nodes, 1, 5);
VECTOR_MATCH(info[2].nodes, 2, 3);
VECTOR_MATCH(info[3].nodes, 6, 7);
}
TEST_F(FamilyGraphTestStrict, Graph1FindIslands0)
{
FamilyGraph* graph = buildFamilyGraph(chunkCount1, adjacentChunkPartition1, adjacentChunkIndices1);
graph->initialize(DEFAULT_ACTOR_INDEX, m_graph);
std::vector<char> scratch;
scratch.resize((size_t)FamilyGraph::findIslandsRequiredScratch(chunkCount1));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 0, 4, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 1, 5, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 2, 6, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 3, 7, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 5, 6, m_graph);
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 9, 10, m_graph);
EXPECT_EQ(11, graph->getEdgesCount(m_graph));
EXPECT_EQ(3, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
std::vector<IslandInfo> info;
getIslandsInfo(*graph, info);
EXPECT_EQ(3, info.size());
VECTOR_MATCH(info[0].nodes, 0, 1, 2, 3);
VECTOR_MATCH(info[1].nodes, 4, 5, 8, 9);
VECTOR_MATCH(info[2].nodes, 6, 7, 10, 11);
}
TEST_F(FamilyGraphTestStrict, Graph1FindIslands1)
{
FamilyGraph* graph = buildFamilyGraph(chunkCount1, adjacentChunkPartition1, adjacentChunkIndices1);
graph->initialize(DEFAULT_ACTOR_INDEX, m_graph);
std::vector<char> scratch;
scratch.resize((size_t)FamilyGraph::findIslandsRequiredScratch(chunkCount1));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 0, 4, m_graph);
EXPECT_EQ(1, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 1, 5, m_graph);
EXPECT_EQ(0, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 2, 6, m_graph);
EXPECT_EQ(0, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 3, 7, m_graph);
EXPECT_EQ(1, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 5, 6, m_graph);
EXPECT_EQ(0, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, 9, 10, m_graph);
EXPECT_EQ(1, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
std::vector<IslandInfo> info;
getIslandsInfo(*graph, info);
EXPECT_EQ(3, info.size());
VECTOR_MATCH(info[0].nodes, 0, 1, 2, 3);
VECTOR_MATCH(info[1].nodes, 4, 5, 8, 9);
VECTOR_MATCH(info[2].nodes, 6, 7, 10, 11);
}
TEST_F(FamilyGraphTestStrict, Graph1FindIslandsRemoveAllEdges)
{
FamilyGraph* graph = buildFamilyGraph(chunkCount1, adjacentChunkPartition1, adjacentChunkIndices1);
graph->initialize(DEFAULT_ACTOR_INDEX, m_graph);
std::vector<char> scratch;
scratch.resize((size_t)FamilyGraph::findIslandsRequiredScratch(chunkCount1));
uint32_t edges = graph->getEdgesCount(m_graph);
for (uint32_t node0 = 0; node0 < chunkCount1; node0++)
{
for (uint32_t i = adjacentChunkPartition1[node0]; i < adjacentChunkPartition1[node0 + 1]; i++)
{
if (graph->notifyEdgeRemoved(DEFAULT_ACTOR_INDEX, node0, adjacentChunkIndices1[i], m_graph))
{
edges--;
EXPECT_EQ(edges, graph->getEdgesCount(m_graph));
}
}
}
EXPECT_EQ(0, graph->getEdgesCount(m_graph));
EXPECT_EQ(12, graph->findIslands(DEFAULT_ACTOR_INDEX, scratch.data(), m_graph));
for (uint32_t node0 = 0; node0 < chunkCount1; node0++)
{
EXPECT_EQ(node0, graph->getIslandIds()[node0]);
}
}
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/utils/TestProfiler.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef TESTPROFILER_H
#define TESTPROFILER_H
#include "NvBlastInternalProfiler.h"
#define TEST_ZONE_BEGIN(name) platformZoneStart(name)
#define TEST_ZONE_END(name) platformZoneEnd()
#endif // TESTPROFILER_H
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/utils/TestAssets.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef TESTASSETS_H
#define TESTASSETS_H
#include "NvBlast.h"
#include "AssetGenerator.h"
struct ExpectedAssetValues
{
uint32_t totalChunkCount;
uint32_t graphNodeCount;
uint32_t leafChunkCount;
uint32_t bondCount;
uint32_t subsupportChunkCount;
};
// Indexable asset descriptors and expected values
extern const NvBlastAssetDesc g_assetDescs[6];
extern const ExpectedAssetValues g_assetExpectedValues[6];
// Indexable asset descriptors for assets missing coverage and expected values
extern const NvBlastAssetDesc g_assetDescsMissingCoverage[6];
extern const ExpectedAssetValues g_assetsFromMissingCoverageExpectedValues[6];
inline uint32_t getAssetDescCount()
{
return sizeof(g_assetDescs) / sizeof(g_assetDescs[0]);
}
inline uint32_t getAssetDescMissingCoverageCount()
{
return sizeof(g_assetDescsMissingCoverage) / sizeof(g_assetDescsMissingCoverage[0]);
}
void generateCube(GeneratorAsset& cubeAsset, NvBlastAssetDesc& assetDesc, size_t maxDepth, size_t width,
int32_t supportDepth = -1, CubeAssetGenerator::BondFlags bondFlags = CubeAssetGenerator::ALL_INTERNAL_BONDS);
void generateRandomCube(GeneratorAsset& cubeAsset, NvBlastAssetDesc& assetDesc, uint32_t minChunkCount, uint32_t maxChunkCount);
#endif // #ifdef TESTASSETS_H
|
NVIDIA-Omniverse/PhysX/blast/source/test/src/utils/TestAssets.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "TestAssets.h"
#include "AssetGenerator.h"
#include <algorithm>
const NvBlastChunkDesc g_cube1ChunkDescs[9] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 8.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {-0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {-0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {-0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {-0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 8 },
};
const NvBlastBondDesc g_cube1BondDescs[12] =
{
// normal area centroid userData chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f,-0.5f,-0.5f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f, 0.5f,-0.5f }, 0 }, { 3, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f,-0.5f, 0.5f }, 0 }, { 5, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f, 0.5f, 0.5f }, 0 }, { 7, 8 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, {-0.5f, 0.0f,-0.5f }, 0 }, { 1, 3 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 0.5f, 0.0f,-0.5f }, 0 }, { 2, 4 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, {-0.5f, 0.0f, 0.5f }, 0 }, { 5, 7 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 0.5f, 0.0f, 0.5f }, 0 }, { 6, 8 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, {-0.5f,-0.5f, 0.0f }, 0 }, { 1, 5 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, { 0.5f,-0.5f, 0.0f }, 0 }, { 2, 6 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, {-0.5f, 0.5f, 0.0f }, 0 }, { 3, 7 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, { 0.5f, 0.5f, 0.0f }, 0 }, { 4, 8 } },
};
const NvBlastBondDesc g_cube1BondDescs_wb[16] =
{
// normal area centroid userData chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f,-0.5f,-0.5f }, 0 }, { 1, 2 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f, 0.5f,-0.5f }, 0 }, { 3, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f,-0.5f, 0.5f }, 0 }, { 5, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f, { 0.0f, 0.5f, 0.5f }, 0 }, { 7, 8 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, {-0.5f, 0.0f,-0.5f }, 0 }, { 1, 3 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 0.5f, 0.0f,-0.5f }, 0 }, { 2, 4 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, {-0.5f, 0.0f, 0.5f }, 0 }, { 5, 7 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f, { 0.5f, 0.0f, 0.5f }, 0 }, { 6, 8 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, {-0.5f,-0.5f, 0.0f }, 0 }, { 1, 5 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, { 0.5f,-0.5f, 0.0f }, 0 }, { 2, 6 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, {-0.5f, 0.5f, 0.0f }, 0 }, { 3, 7 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f, { 0.5f, 0.5f, 0.0f }, 0 }, { 4, 8 } },
{ { { 0.0f, 0.0f,-1.0f }, 1.0f, {-0.5f,-0.5f,-1.0f }, 0 }, { 1, UINT32_MAX } },
{ { { 0.0f, 0.0f,-1.0f }, 1.0f, { 0.5f,-0.5f,-1.0f }, 0 }, { 2, UINT32_MAX } },
{ { { 0.0f, 0.0f,-1.0f }, 1.0f, {-0.5f, 0.5f,-1.0f }, 0 }, { 3, UINT32_MAX } },
{ { { 0.0f, 0.0f,-1.0f }, 1.0f, { 0.5f, 0.5f,-1.0f }, 0 }, { 4, UINT32_MAX } },
};
const NvBlastChunkDesc g_cube2ChunkDescs[73] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 8.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {-0.5f, -0.5f, -0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.5f, -0.5f, -0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {-0.5f, 0.5f, -0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.5f, 0.5f, -0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {-0.5f, -0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.5f, -0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {-0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 8 },
{ {-0.25f-0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 9 },
{ { 0.25f-0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 10 },
{ {-0.25f-0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 11 },
{ { 0.25f-0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 12 },
{ {-0.25f-0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 13 },
{ { 0.25f-0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 14 },
{ {-0.25f-0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 15 },
{ { 0.25f-0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 16 },
{ {-0.25f+0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 17 },
{ { 0.25f+0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 18 },
{ {-0.25f+0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 19 },
{ { 0.25f+0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 20 },
{ {-0.25f+0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 21 },
{ { 0.25f+0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 22 },
{ {-0.25f+0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 23 },
{ { 0.25f+0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 2, NvBlastChunkDesc::NoFlags, 24 },
{ {-0.25f-0.5f,-0.25f+0.5f,-0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 25 },
{ { 0.25f-0.5f,-0.25f+0.5f,-0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 26 },
{ {-0.25f-0.5f, 0.25f+0.5f,-0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 27 },
{ { 0.25f-0.5f, 0.25f+0.5f,-0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 28 },
{ {-0.25f-0.5f,-0.25f+0.5f, 0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 29 },
{ { 0.25f-0.5f,-0.25f+0.5f, 0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 30 },
{ {-0.25f-0.5f, 0.25f+0.5f, 0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 31 },
{ { 0.25f-0.5f, 0.25f+0.5f, 0.25f-0.5f }, 0.125f, 3, NvBlastChunkDesc::NoFlags, 32 },
{ {-0.25f+0.5f,-0.25f+0.5f,-0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 33 },
{ { 0.25f+0.5f,-0.25f+0.5f,-0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 34 },
{ {-0.25f+0.5f, 0.25f+0.5f,-0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 35 },
{ { 0.25f+0.5f, 0.25f+0.5f,-0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 36 },
{ {-0.25f+0.5f,-0.25f+0.5f, 0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 37 },
{ { 0.25f+0.5f,-0.25f+0.5f, 0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 38 },
{ {-0.25f+0.5f, 0.25f+0.5f, 0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 39 },
{ { 0.25f+0.5f, 0.25f+0.5f, 0.25f-0.5f }, 0.125f, 4, NvBlastChunkDesc::NoFlags, 40 },
{ {-0.25f-0.5f,-0.25f-0.5f,-0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 41 },
{ { 0.25f-0.5f,-0.25f-0.5f,-0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 42 },
{ {-0.25f-0.5f, 0.25f-0.5f,-0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 43 },
{ { 0.25f-0.5f, 0.25f-0.5f,-0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 44 },
{ {-0.25f-0.5f,-0.25f-0.5f, 0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 45 },
{ { 0.25f-0.5f,-0.25f-0.5f, 0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 46 },
{ {-0.25f-0.5f, 0.25f-0.5f, 0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 47 },
{ { 0.25f-0.5f, 0.25f-0.5f, 0.25f+0.5f }, 0.125f, 5, NvBlastChunkDesc::NoFlags, 48 },
{ {-0.25f+0.5f,-0.25f-0.5f,-0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 49 },
{ { 0.25f+0.5f,-0.25f-0.5f,-0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 50 },
{ {-0.25f+0.5f, 0.25f-0.5f,-0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 51 },
{ { 0.25f+0.5f, 0.25f-0.5f,-0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 52 },
{ {-0.25f+0.5f,-0.25f-0.5f, 0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 53 },
{ { 0.25f+0.5f,-0.25f-0.5f, 0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 54 },
{ {-0.25f+0.5f, 0.25f-0.5f, 0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 55 },
{ { 0.25f+0.5f, 0.25f-0.5f, 0.25f+0.5f }, 0.125f, 6, NvBlastChunkDesc::NoFlags, 56 },
{ {-0.25f-0.5f,-0.25f+0.5f,-0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 57 },
{ { 0.25f-0.5f,-0.25f+0.5f,-0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 58 },
{ {-0.25f-0.5f, 0.25f+0.5f,-0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 59 },
{ { 0.25f-0.5f, 0.25f+0.5f,-0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 60 },
{ {-0.25f-0.5f,-0.25f+0.5f, 0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 61 },
{ { 0.25f-0.5f,-0.25f+0.5f, 0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 62 },
{ {-0.25f-0.5f, 0.25f+0.5f, 0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 63 },
{ { 0.25f-0.5f, 0.25f+0.5f, 0.25f+0.5f }, 0.125f, 7, NvBlastChunkDesc::NoFlags, 64 },
{ {-0.25f+0.5f,-0.25f+0.5f,-0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 65 },
{ { 0.25f+0.5f,-0.25f+0.5f,-0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 66 },
{ {-0.25f+0.5f, 0.25f+0.5f,-0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 67 },
{ { 0.25f+0.5f, 0.25f+0.5f,-0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 68 },
{ {-0.25f+0.5f,-0.25f+0.5f, 0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 69 },
{ { 0.25f+0.5f,-0.25f+0.5f, 0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 70 },
{ {-0.25f+0.5f, 0.25f+0.5f, 0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 71 },
{ { 0.25f+0.5f, 0.25f+0.5f, 0.25f+0.5f }, 0.125f, 8, NvBlastChunkDesc::NoFlags, 72 },
};
const NvBlastChunkDesc g_cube3ChunkDescs[11] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 4.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ { 0.0f, 0.0f, 0.0f }, 3.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 1 },
{ { 0.0f, 0.0f, 0.0f }, 1.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 2 },
{ {-0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {-0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.5f, 0.5f,-0.5f }, 1.0f, 1, NvBlastChunkDesc::SupportFlag, 6 },
{ {-0.5f,-0.5f, 0.5f }, 1.0f, 1, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.5f,-0.5f, 0.5f }, 1.0f, 1, NvBlastChunkDesc::SupportFlag, 8 },
{ {-0.5f, 0.5f, 0.5f }, 1.0f, 2, NvBlastChunkDesc::SupportFlag, 9 },
{ { 0.5f, 0.5f, 0.5f }, 1.0f, 2, NvBlastChunkDesc::SupportFlag, 10 },
};
const NvBlastBondDesc g_cube3BondDescs[12] =
{
// normal area centroid userData chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f,-0.5f,-0.5f }, 0 }, { 3, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f, 0.5f,-0.5f }, 0 }, { 5, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f,-0.5f, 0.5f }, 0 }, { 7, 8 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f, 0.5f, 0.5f }, 0 }, { 9, 10} },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{-0.5f, 0.0f,-0.5f }, 0 }, { 3, 5 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 0.5f, 0.0f,-0.5f }, 0 }, { 4, 6 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{-0.5f, 0.0f, 0.5f }, 0 }, { 7, 9 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 0.5f, 0.0f, 0.5f }, 0 }, { 8, 10} },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{-0.5f,-0.5f, 0.0f }, 0 }, { 3, 7 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{ 0.5f,-0.5f, 0.0f }, 0 }, { 4, 8 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{-0.5f, 0.5f, 0.0f }, 0 }, { 5, 9 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{ 0.5f, 0.5f, 0.0f }, 0 }, { 6, 10} },
};
const NvBlastBondDesc g_cube3BondDescs_wb[16] =
{
// normal area centroid userData chunks
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f,-0.5f,-0.5f }, 0 }, { 3, 4 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f, 0.5f,-0.5f }, 0 }, { 5, 6 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f,-0.5f, 0.5f }, 0 }, { 7, 8 } },
{ { { 1.0f, 0.0f, 0.0f }, 1.0f,{ 0.0f, 0.5f, 0.5f }, 0 }, { 9, 10} },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{-0.5f, 0.0f,-0.5f }, 0 }, { 3, 5 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 0.5f, 0.0f,-0.5f }, 0 }, { 4, 6 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{-0.5f, 0.0f, 0.5f }, 0 }, { 7, 9 } },
{ { { 0.0f, 1.0f, 0.0f }, 1.0f,{ 0.5f, 0.0f, 0.5f }, 0 }, { 8, 10} },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{-0.5f,-0.5f, 0.0f }, 0 }, { 3, 7 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{ 0.5f,-0.5f, 0.0f }, 0 }, { 4, 8 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{-0.5f, 0.5f, 0.0f }, 0 }, { 5, 9 } },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{ 0.5f, 0.5f, 0.0f }, 0 }, { 6, 10} },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{-0.5f,-0.5f,-1.0f }, 0 }, { 3, UINT32_MAX} },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{ 0.5f,-0.5f,-1.0f }, 0 }, { 4, UINT32_MAX} },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{-0.5f, 0.5f,-1.0f }, 0 }, { 5, UINT32_MAX} },
{ { { 0.0f, 0.0f, 1.0f }, 1.0f,{ 0.5f, 0.5f,-1.0f }, 0 }, { 6, UINT32_MAX} },
};
const NvBlastAssetDesc g_assetDescs[6] =
{
// 2x2x2 axis-aligned cube centered at the origin, split into 8 depth-1 1x1x1 child chunks
{ sizeof(g_cube1ChunkDescs) / sizeof(g_cube1ChunkDescs[0]), g_cube1ChunkDescs, sizeof(g_cube1BondDescs) / sizeof(g_cube1BondDescs[0]), g_cube1BondDescs },
// 2x2x2 axis-aligned cube centered at the origin, split into 8 depth-1 1x1x1 child chunks which are then split into 8 depth-2 (1/2)x(1/2)x(1/2) child chunks each
// Support is at depth-1, so the g_cube1BondDescs are used
{ sizeof(g_cube2ChunkDescs) / sizeof(g_cube2ChunkDescs[0]), g_cube2ChunkDescs, sizeof(g_cube1BondDescs) / sizeof(g_cube1BondDescs[0]), g_cube1BondDescs },
// 2x2x2 axis-aligned cube centered at the origin, split into 8 depth-1 1x1x1 child chunks with multiple roots
{ sizeof(g_cube3ChunkDescs) / sizeof(g_cube3ChunkDescs[0]), g_cube3ChunkDescs, sizeof(g_cube3BondDescs) / sizeof(g_cube3BondDescs[0]), g_cube3BondDescs },
// 2x2x2 axis-aligned cube centered at the origin, split into 8 depth-1 1x1x1 child chunks - contains world-bound chunks
{ sizeof(g_cube1ChunkDescs) / sizeof(g_cube1ChunkDescs[0]), g_cube1ChunkDescs, sizeof(g_cube1BondDescs_wb) / sizeof(g_cube1BondDescs_wb[0]), g_cube1BondDescs_wb },
// 2x2x2 axis-aligned cube centered at the origin, split into 8 depth-1 1x1x1 child chunks which are then split into 8 depth-2 (1/2)x(1/2)x(1/2) child chunks each - contains world-bound chunks
// Support is at depth-1, so the g_cube1BondDescs_wb are used
{ sizeof(g_cube2ChunkDescs) / sizeof(g_cube2ChunkDescs[0]), g_cube2ChunkDescs, sizeof(g_cube1BondDescs_wb) / sizeof(g_cube1BondDescs_wb[0]), g_cube1BondDescs_wb },
// 2x2x2 axis-aligned cube centered at the origin, split into 8 depth-1 1x1x1 child chunks with multiple roots - contains world-bound chunks
{ sizeof(g_cube3ChunkDescs) / sizeof(g_cube3ChunkDescs[0]), g_cube3ChunkDescs, sizeof(g_cube3BondDescs_wb) / sizeof(g_cube3BondDescs_wb[0]), g_cube3BondDescs_wb },
};
struct ExpectedValues
{
uint32_t totalChunkCount;
uint32_t graphNodeCount;
uint32_t leafChunkCount;
uint32_t bondCount;
uint32_t subsupportChunkCount;
};
const ExpectedAssetValues g_assetExpectedValues[6] =
{
// total graph leaves bonds sub
{ 9, 8, 8, 12, 0 },
{ 73, 8, 64, 12, 64 },
{ 11, 8, 8, 12, 0 },
{ 9, 9, 8, 16, 0 },
{ 73, 9, 64, 16, 64 },
{ 11, 9, 8, 16, 0 },
};
///////////// Badly-formed asset descs below //////////////
const NvBlastChunkDesc g_cube1ChunkDescsMissingCoverage[9] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 8.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {-0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 1 },
{ { 0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {-0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {-0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {-0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::NoFlags, 8 },
};
const NvBlastChunkDesc g_cube2ChunkDescsMissingCoverage1[17] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 8.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {-0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::NoFlags, 1 },
{ { 0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {-0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {-0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {-0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::NoFlags, 8 },
{ {-0.25f-0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 9 },
{ { 0.25f-0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 10 },
{ {-0.25f-0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 11 },
{ { 0.25f-0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 12 },
{ {-0.25f-0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 13 },
{ { 0.25f-0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 14 },
{ {-0.25f-0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 15 },
{ { 0.25f-0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 16 },
};
const NvBlastChunkDesc g_cube2ChunkDescsMissingCoverage2[17] =
{
// centroid volume parent idx flags ID
{ { 0.0f, 0.0f, 0.0f }, 8.0f, UINT32_MAX, NvBlastChunkDesc::NoFlags, 0 },
{ {-0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::NoFlags, 1 },
{ { 0.5f,-0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 2 },
{ {-0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 3 },
{ { 0.5f, 0.5f,-0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 4 },
{ {-0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 5 },
{ { 0.5f,-0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 6 },
{ {-0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::SupportFlag, 7 },
{ { 0.5f, 0.5f, 0.5f }, 1.0f, 0, NvBlastChunkDesc::NoFlags, 8 },
{ {-0.25f-0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 9 },
{ { 0.25f-0.5f,-0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 10 },
{ {-0.25f-0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 11 },
{ { 0.25f-0.5f, 0.25f-0.5f,-0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 12 },
{ {-0.25f-0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 13 },
{ { 0.25f-0.5f,-0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 14 },
{ {-0.25f-0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::NoFlags, 15 },
{ { 0.25f-0.5f, 0.25f-0.5f, 0.25f-0.5f }, 0.125f, 1, NvBlastChunkDesc::SupportFlag, 16 },
};
const NvBlastAssetDesc g_assetDescsMissingCoverage[6] =
{
{ sizeof(g_cube1ChunkDescsMissingCoverage) / sizeof(g_cube1ChunkDescsMissingCoverage[0]), g_cube1ChunkDescsMissingCoverage, sizeof(g_cube1BondDescs) / sizeof(g_cube1BondDescs[0]), g_cube1BondDescs },
{ sizeof(g_cube2ChunkDescsMissingCoverage1) / sizeof(g_cube2ChunkDescsMissingCoverage1[0]), g_cube2ChunkDescsMissingCoverage1, sizeof(g_cube1BondDescs) / sizeof(g_cube1BondDescs[0]), g_cube1BondDescs },
{ sizeof(g_cube2ChunkDescsMissingCoverage2) / sizeof(g_cube2ChunkDescsMissingCoverage2[0]), g_cube2ChunkDescsMissingCoverage2, sizeof(g_cube1BondDescs) / sizeof(g_cube1BondDescs[0]), g_cube1BondDescs },
{ sizeof(g_cube1ChunkDescsMissingCoverage) / sizeof(g_cube1ChunkDescsMissingCoverage[0]), g_cube1ChunkDescsMissingCoverage, sizeof(g_cube1BondDescs_wb) / sizeof(g_cube1BondDescs_wb[0]), g_cube1BondDescs_wb },
{ sizeof(g_cube2ChunkDescsMissingCoverage1) / sizeof(g_cube2ChunkDescsMissingCoverage1[0]), g_cube2ChunkDescsMissingCoverage1, sizeof(g_cube1BondDescs_wb) / sizeof(g_cube1BondDescs_wb[0]), g_cube1BondDescs_wb },
{ sizeof(g_cube2ChunkDescsMissingCoverage2) / sizeof(g_cube2ChunkDescsMissingCoverage2[0]), g_cube2ChunkDescsMissingCoverage2, sizeof(g_cube1BondDescs_wb) / sizeof(g_cube1BondDescs_wb[0]), g_cube1BondDescs_wb },
};
extern const ExpectedAssetValues g_assetsFromMissingCoverageExpectedValues[6] =
{
// total graph leaves bonds sub
{ 9, 8, 8, 12, 0 },
{ 17, 8, 15, 12, 8 },
{ 17, 15, 15, 9, 0 },
{ 9, 9, 8, 16, 0 },
{ 17, 9, 15, 16, 8 },
{ 17, 16, 15, 12, 0 },
};
void generateCube(GeneratorAsset& cubeAsset, NvBlastAssetDesc& assetDesc, size_t maxDepth, size_t width, int32_t supportDepth, CubeAssetGenerator::BondFlags bondFlags)
{
CubeAssetGenerator::Settings settings;
settings.extents = GeneratorAsset::Vec3(1, 1, 1);
CubeAssetGenerator::DepthInfo depthInfo;
depthInfo.slicesPerAxis = GeneratorAsset::Vec3(1, 1, 1);
depthInfo.flag = NvBlastChunkDesc::Flags::NoFlags;
settings.depths.push_back(depthInfo);
settings.bondFlags = bondFlags;
for (uint32_t depth = 1; depth < maxDepth; ++depth)
{
depthInfo.slicesPerAxis = GeneratorAsset::Vec3((float)width, (float)width, (float)width);
settings.depths.push_back(depthInfo);
}
settings.depths[(supportDepth > 0 ? supportDepth : maxDepth) - 1].flag = NvBlastChunkDesc::SupportFlag; // Leaves are support
CubeAssetGenerator::generate(cubeAsset, settings);
assetDesc.bondCount = (uint32_t)cubeAsset.solverBonds.size();
assetDesc.bondDescs = cubeAsset.solverBonds.data();
assetDesc.chunkCount = (uint32_t)cubeAsset.chunks.size();
assetDesc.chunkDescs = cubeAsset.solverChunks.data();
}
void generateRandomCube(GeneratorAsset& cubeAsset, NvBlastAssetDesc& desc, uint32_t minChunkCount, uint32_t maxChunkCount)
{
CubeAssetGenerator::Settings settings;
settings.extents = GeneratorAsset::Vec3(1, 1, 1);
CubeAssetGenerator::DepthInfo depthInfo;
depthInfo.slicesPerAxis = GeneratorAsset::Vec3(1, 1, 1);
depthInfo.flag = NvBlastChunkDesc::Flags::NoFlags;
settings.depths.push_back(depthInfo);
uint32_t chunkCount = 1;
while (chunkCount < minChunkCount)
{
uint32_t chunkMul;
do
{
depthInfo.slicesPerAxis = GeneratorAsset::Vec3((float)(1 + rand() % 4), (float)(1 + rand() % 4), (float)(1 + rand() % 4));
chunkMul = (uint32_t)(depthInfo.slicesPerAxis.x * depthInfo.slicesPerAxis.y * depthInfo.slicesPerAxis.z);
} while (chunkMul == 1);
if (chunkCount*chunkMul > maxChunkCount)
{
break;
}
chunkCount *= chunkMul;
settings.depths.push_back(depthInfo);
settings.extents = settings.extents * depthInfo.slicesPerAxis;
}
settings.depths.back().flag = NvBlastChunkDesc::SupportFlag; // Leaves are support
// Make largest direction unit size
settings.extents = settings.extents * (1.0f / std::max(settings.extents.x, std::max(settings.extents.y, settings.extents.z)));
// Create asset
CubeAssetGenerator::generate(cubeAsset, settings);
desc.chunkDescs = cubeAsset.solverChunks.data();
desc.chunkCount = (uint32_t)cubeAsset.solverChunks.size();
desc.bondDescs = cubeAsset.solverBonds.data();
desc.bondCount = (uint32_t)cubeAsset.solverBonds.size();
} |
NVIDIA-Omniverse/PhysX/blast/source/test/src/utils/TaskDispatcher.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#pragma once
#include <thread>
#include <mutex>
#include <queue>
#include <list>
#include <future>
#include <condition_variable>
#include <memory>
#include <atomic>
class TaskDispatcher
{
public:
class Task
{
public:
virtual void process() = 0;
virtual ~Task() {};
};
typedef std::function<void(TaskDispatcher& dispatcher, std::unique_ptr<Task>)> OnTaskFinishedFunction;
TaskDispatcher(uint32_t threadCount, OnTaskFinishedFunction onTaskFinished) :
m_workingThreadsCount(0), m_onTaskFinished(onTaskFinished)
{
m_threads.resize(threadCount);
for (uint32_t i = 0; i < threadCount; i++)
{
m_threads[i] = std::unique_ptr<Thread>(new Thread(i, m_completionSemaphore));
m_threads[i]->start();
m_freeThreads.push(m_threads[i].get());
}
}
void addTask(std::unique_ptr<Task> task)
{
m_tasks.push(std::move(task));
}
void process()
{
// main loop
while (m_tasks.size() > 0 || m_workingThreadsCount > 0)
{
// assign tasks
while (!(m_tasks.empty() || m_freeThreads.empty()))
{
auto task = std::move(m_tasks.front());
m_tasks.pop();
Thread* freeThread = m_freeThreads.front();
m_freeThreads.pop();
freeThread->processTask(std::move(task));
m_workingThreadsCount++;
}
m_completionSemaphore.wait();
// check for completion
for (std::unique_ptr<Thread>& thread : m_threads)
{
if (thread->isTaskFinished())
{
std::unique_ptr<Task> task;
thread->collectTask(task);
m_onTaskFinished(*this, std::move(task));
m_freeThreads.push(thread.get());
m_workingThreadsCount--;
break;
}
}
}
}
private:
class Semaphore
{
public:
Semaphore(int count_ = 0)
: m_count(count_) {}
inline void notify()
{
std::unique_lock<std::mutex> lock(m_mutex);
m_count++;
m_cv.notify_one();
}
inline void wait()
{
std::unique_lock<std::mutex> lock(m_mutex);
while (m_count == 0){
m_cv.wait(lock);
}
m_count--;
}
private:
std::mutex m_mutex;
std::condition_variable m_cv;
int m_count;
};
class Thread
{
public:
Thread(uint32_t id_, Semaphore& completionSemaphore) : m_id(id_), m_completionSemaphore(completionSemaphore), m_running(false), m_taskFinished(false) {}
virtual ~Thread() { stop(); }
void start()
{
if (!m_running)
{
m_running = true;
m_thread = std::thread(&Thread::body, this);
}
}
void stop()
{
if (m_running)
{
m_running = false;
m_newTaskSemaphore.notify();
m_thread.join();
}
}
void processTask(std::unique_ptr<Task> task)
{
m_task = std::move(task);
m_taskFinished = false;
m_newTaskSemaphore.notify();
}
void collectTask(std::unique_ptr<Task>& task)
{
task = std::move(m_task);
m_task = nullptr;
m_taskFinished = false;
}
bool hasTask() const { return m_task != nullptr; }
bool isTaskFinished() const { return m_taskFinished; }
private:
void body()
{
while (1)
{
m_newTaskSemaphore.wait();
if (!m_running)
return;
m_task->process();
m_taskFinished = true;
m_completionSemaphore.notify();
}
}
uint32_t m_id;
Semaphore& m_completionSemaphore;
std::thread m_thread;
bool m_running;
std::unique_ptr<Task> m_task;
std::atomic<bool> m_taskFinished;
Semaphore m_newTaskSemaphore;
};
private:
uint32_t m_workingThreadsCount;
std::queue<std::unique_ptr<Task>> m_tasks;
OnTaskFinishedFunction m_onTaskFinished;
std::vector<std::unique_ptr<Thread>> m_threads;
std::queue<Thread*> m_freeThreads;
Semaphore m_completionSemaphore;
};
|
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshUtils.cpp | #include "NvBlastExtAuthoringMeshUtils.h"
#include "NvBlastExtAuthoringMeshImpl.h"
#include "NvBlastExtAuthoringPerlinNoise.h"
#include "NvBlastExtAuthoringFractureTool.h"
#include <NvBlastNvSharedHelpers.h>
#include <NvCMath.h>
#include <algorithm>
using namespace nvidia;
#define UV_SCALE 1.f
#define CYLINDER_UV_SCALE (UV_SCALE * 1.732f)
namespace Nv
{
namespace Blast
{
void getTangents(const NvVec3& normal, NvVec3& t1, NvVec3& t2)
{
if (std::abs(normal.z) < 0.9)
{
t1 = normal.cross(NvVec3(0, 0, 1));
}
else
{
t1 = normal.cross(NvVec3(1, 0, 0));
}
t2 = t1.cross(normal);
}
Mesh* getCuttingBox(const NvVec3& point, const NvVec3& normal, float size, int64_t id, int32_t interiorMaterialId)
{
NvVec3 lNormal = normal.getNormalized();
NvVec3 t1, t2;
getTangents(lNormal, t1, t2);
std::vector<Vertex> positions(8);
toNvShared(positions[0].p) = point + (t1 + t2) * size;
toNvShared(positions[1].p) = point + (t2 - t1) * size;
toNvShared(positions[2].p) = point + (-t1 - t2) * size;
toNvShared(positions[3].p) = point + (t1 - t2) * size;
toNvShared(positions[4].p) = point + (t1 + t2 + lNormal) * size;
toNvShared(positions[5].p) = point + (t2 - t1 + lNormal) * size;
toNvShared(positions[6].p) = point + (-t1 - t2 + lNormal) * size;
toNvShared(positions[7].p) = point + (t1 - t2 + lNormal) * size;
toNvShared(positions[0].n) = -lNormal;
toNvShared(positions[1].n) = -lNormal;
toNvShared(positions[2].n) = -lNormal;
toNvShared(positions[3].n) = -lNormal;
toNvShared(positions[4].n) = -lNormal;
toNvShared(positions[5].n) = -lNormal;
toNvShared(positions[6].n) = -lNormal;
toNvShared(positions[7].n) = -lNormal;
positions[0].uv[0] = { 0, 0 };
positions[1].uv[0] = {UV_SCALE, 0};
positions[2].uv[0] = {UV_SCALE, UV_SCALE};
positions[3].uv[0] = {0, UV_SCALE};
positions[4].uv[0] = {0, 0};
positions[5].uv[0] = {UV_SCALE, 0};
positions[6].uv[0] = {UV_SCALE, UV_SCALE};
positions[7].uv[0] = {0, UV_SCALE};
std::vector<Edge> edges;
std::vector<Facet> facets;
edges.push_back({0, 1});
edges.push_back({1, 2});
edges.push_back({2, 3});
edges.push_back({3, 0});
facets.push_back({0, 4, id, interiorMaterialId, -1});
edges.push_back({0, 3});
edges.push_back({3, 7});
edges.push_back({7, 4});
edges.push_back({4, 0});
facets.push_back({4, 4, id, interiorMaterialId, -1});
edges.push_back({3, 2});
edges.push_back({2, 6});
edges.push_back({6, 7});
edges.push_back({7, 3});
facets.push_back({8, 4, id, interiorMaterialId, -1});
edges.push_back({5, 6});
edges.push_back({6, 2});
edges.push_back({2, 1});
edges.push_back({1, 5});
facets.push_back({12, 4, id, interiorMaterialId, -1});
edges.push_back({4, 5});
edges.push_back({5, 1});
edges.push_back({1, 0});
edges.push_back({0, 4});
facets.push_back({16, 4, id, interiorMaterialId, -1});
edges.push_back({4, 7});
edges.push_back({7, 6});
edges.push_back({6, 5});
edges.push_back({5, 4});
facets.push_back({20, 4, id, interiorMaterialId, -1});
return new MeshImpl(positions.data(), edges.data(), facets.data(), static_cast<uint32_t>(positions.size()),
static_cast<uint32_t>(edges.size()), static_cast<uint32_t>(facets.size()));
}
void inverseNormalAndIndices(Mesh* mesh)
{
for (uint32_t i = 0; i < mesh->getVerticesCount(); ++i)
{
toNvShared(mesh->getVerticesWritable()[i].n) *= -1.0f;
}
for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
{
mesh->getFacetWritable(i)->userData = -mesh->getFacet(i)->userData;
}
}
void setCuttingBox(const NvVec3& point, const NvVec3& normal, Mesh* mesh, float size, int64_t id)
{
NvVec3 t1, t2;
NvVec3 lNormal = normal.getNormalized();
getTangents(lNormal, t1, t2);
Vertex* positions = mesh->getVerticesWritable();
toNvShared(positions[0].p) = point + (t1 + t2) * size;
toNvShared(positions[1].p) = point + (t2 - t1) * size;
toNvShared(positions[2].p) = point + (-t1 - t2) * size;
toNvShared(positions[3].p) = point + (t1 - t2) * size;
toNvShared(positions[4].p) = point + (t1 + t2 + lNormal) * size;
toNvShared(positions[5].p) = point + (t2 - t1 + lNormal) * size;
toNvShared(positions[6].p) = point + (-t1 - t2 + lNormal) * size;
toNvShared(positions[7].p) = point + (t1 - t2 + lNormal) * size;
toNvShared(positions[0].n) = -lNormal;
toNvShared(positions[1].n) = -lNormal;
toNvShared(positions[2].n) = -lNormal;
toNvShared(positions[3].n) = -lNormal;
toNvShared(positions[4].n) = -lNormal;
toNvShared(positions[5].n) = -lNormal;
toNvShared(positions[6].n) = -lNormal;
toNvShared(positions[7].n) = -lNormal;
for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
{
mesh->getFacetWritable(i)->userData = id;
}
mesh->recalculateBoundingBox();
}
struct Stepper
{
virtual nvidia::NvVec3 getStep1(uint32_t w, uint32_t h) const = 0;
virtual nvidia::NvVec3 getStep2(uint32_t w) const = 0;
virtual nvidia::NvVec3 getStart() const = 0;
virtual nvidia::NvVec3 getNormal(uint32_t w, uint32_t h) const = 0;
virtual bool isStep2ClosedLoop() const
{
return false;
}
virtual bool isStep2FreeBoundary() const
{
return false;
}
};
struct PlaneStepper : public Stepper
{
PlaneStepper(const nvidia::NvVec3& normal, const nvidia::NvVec3& point, float sizeX, float sizeY,
uint32_t resolutionX, uint32_t resolutionY, bool swapTangents = false)
{
NvVec3 t1, t2;
lNormal = normal.getNormalized();
getTangents(lNormal, t1, t2);
if (swapTangents)
{
std::swap(t1, t2);
}
t11d = -t1 * 2.0f * sizeX / resolutionX;
t12d = -t2 * 2.0f * sizeY / resolutionY;
t21d = t11d;
t22d = t12d;
cPos = point + (t1 * sizeX + t2 * sizeY);
resY = resolutionY;
}
// Define face by 4 corner points, points should lay in plane
PlaneStepper(const nvidia::NvVec3& p11, const nvidia::NvVec3& p12, const nvidia::NvVec3& p21, const nvidia::NvVec3& p22,
uint32_t resolutionX, uint32_t resolutionY)
{
lNormal = -(p21 - p11).cross(p12 - p11).getNormalized();
if (lNormal.magnitude() < 1e-5)
{
lNormal = (p21 - p22).cross(p12 - p22).getNormalized();
}
t11d = (p11 - p21) / resolutionX;
t12d = (p12 - p11) / resolutionY;
t21d = (p12 - p22) / resolutionX;
t22d = (p22 - p21) / resolutionY;
cPos = p21;
resY = resolutionY;
}
nvidia::NvVec3 getStep1(uint32_t y, uint32_t) const
{
return (t11d * (resY - y) + t21d * y) / resY;
}
nvidia::NvVec3 getStep2(uint32_t) const
{
return t22d;
}
nvidia::NvVec3 getStart() const
{
return cPos;
}
nvidia::NvVec3 getNormal(uint32_t, uint32_t) const
{
return lNormal;
}
NvVec3 t11d, t12d, t21d, t22d, cPos, lNormal;
uint32_t resY;
};
void fillEdgesAndFaces(std::vector<Edge>& edges, std::vector<Facet>& facets, uint32_t h, uint32_t w,
uint32_t firstVertex, uint32_t verticesCount, int64_t id, int32_t interiorMaterialId,
int32_t smoothingGroup = -1, bool reflected = false)
{
for (uint32_t i = 0; i < w; ++i)
{
for (uint32_t j = 0; j < h; ++j)
{
int32_t start = edges.size();
uint32_t idx00 = i * (h + 1) + j + firstVertex;
uint32_t idx01 = idx00 + 1;
uint32_t idx10 = (idx00 + h + 1) % verticesCount;
uint32_t idx11 = (idx01 + h + 1) % verticesCount;
if (reflected)
{
edges.push_back({idx01, idx11});
edges.push_back({idx11, idx10});
edges.push_back({idx10, idx01});
facets.push_back({start, 3, id, interiorMaterialId, smoothingGroup});
start = edges.size();
edges.push_back({idx01, idx10});
edges.push_back({idx10, idx00});
edges.push_back({idx00, idx01});
facets.push_back({start, 3, id, interiorMaterialId, smoothingGroup});
}
else
{
edges.push_back({idx00, idx01});
edges.push_back({idx01, idx11});
edges.push_back({idx11, idx00});
facets.push_back({start, 3, id, interiorMaterialId, smoothingGroup});
start = edges.size();
edges.push_back({idx00, idx11});
edges.push_back({idx11, idx10});
edges.push_back({idx10, idx00});
facets.push_back({start, 3, id, interiorMaterialId, smoothingGroup});
}
}
}
}
void getNoisyFace(std::vector<Vertex>& vertices, std::vector<Edge>& edges, std::vector<Facet>& facets, uint32_t h,
uint32_t w, const nvidia::NvVec2& uvOffset, const nvidia::NvVec2& uvScale, const Stepper& stepper,
SimplexNoise& nEval, int64_t id, int32_t interiorMaterialId, bool randomizeLast = false)
{
uint32_t randIdx = randomizeLast ? 1 : 0;
NvVec3 cPosit = stepper.getStart();
uint32_t firstVertex = vertices.size();
for (uint32_t i = 0; i < w + 1; ++i)
{
NvVec3 lcPosit = cPosit;
for (uint32_t j = 0; j < h + 1; ++j)
{
vertices.push_back(Vertex());
toNvShared(vertices.back().p) = lcPosit;
toNvShared(vertices.back().uv[0]) = uvOffset + uvScale.multiply(nvidia::NvVec2(j, i));
lcPosit += stepper.getStep1(i, j);
}
cPosit += stepper.getStep2(i);
}
for (uint32_t i = 1 - randIdx; i < w + randIdx; ++i)
{
for (uint32_t j = 1; j < h; ++j)
{
// TODO limit max displacement for cylinder
NvVec3& pnt = toNvShared(vertices[i * (h + 1) + j + firstVertex].p);
pnt += stepper.getNormal(i, j) * nEval.sample(pnt);
}
}
fillEdgesAndFaces(edges, facets, h, w, firstVertex, vertices.size(), id, interiorMaterialId);
}
uint32_t unsignedMod(int32_t n, uint32_t modulus)
{
const int32_t d = n / (int32_t)modulus;
const int32_t m = n - d * (int32_t)modulus;
return m >= 0 ? (uint32_t)m : (uint32_t)m + modulus;
}
void calculateNormals(std::vector<Vertex>& vertices, uint32_t h, uint32_t w, bool inverseNormals = false)
{
for (uint32_t i = 1; i < w; ++i)
{
for (uint32_t j = 1; j < h; ++j)
{
int32_t idx = i * (h + 1) + j;
NvVec3 v1 = toNvShared(vertices[idx + h + 1].p - vertices[idx].p);
NvVec3 v2 = toNvShared(vertices[idx + 1].p - vertices[idx].p);
NvVec3 v3 = toNvShared(vertices[idx - (h + 1)].p - vertices[idx].p);
NvVec3 v4 = toNvShared(vertices[idx - 1].p - vertices[idx].p);
NvVec3& n = toNvShared(vertices[idx].n);
n = v1.cross(v2) + v2.cross(v3) + v3.cross(v4) + v4.cross(v1);
if (inverseNormals)
{
n = -n;
}
n.normalize();
}
}
}
Mesh* getNoisyCuttingBoxPair(const nvidia::NvVec3& point, const nvidia::NvVec3& normal, float size, float jaggedPlaneSize,
nvidia::NvVec3 resolution, int64_t id, float amplitude, float frequency, int32_t octaves,
int32_t seed, int32_t interiorMaterialId)
{
NvVec3 t1, t2;
NvVec3 lNormal = normal.getNormalized();
getTangents(lNormal, t1, t2);
float sz = 2.f * jaggedPlaneSize;
uint32_t resolutionX =
std::max(1u, (uint32_t)std::roundf(sz * std::abs(t1.x) * resolution.x + sz * std::abs(t1.y) * resolution.y +
sz * std::abs(t1.z) * resolution.z));
uint32_t resolutionY =
std::max(1u, (uint32_t)std::roundf(sz * std::abs(t2.x) * resolution.x + sz * std::abs(t2.y) * resolution.y +
sz * std::abs(t2.z) * resolution.z));
PlaneStepper stepper(normal, point, jaggedPlaneSize, jaggedPlaneSize, resolutionX, resolutionY);
SimplexNoise nEval(amplitude, frequency, octaves, seed);
std::vector<Vertex> vertices;
vertices.reserve((resolutionX + 1) * (resolutionY + 1) + 12);
std::vector<Edge> edges;
std::vector<Facet> facets;
getNoisyFace(vertices, edges, facets, resolutionX, resolutionY, nvidia::NvVec2(0.f),
nvidia::NvVec2(UV_SCALE / resolutionX, UV_SCALE / resolutionY), stepper, nEval, id, interiorMaterialId);
calculateNormals(vertices, resolutionX, resolutionY);
uint32_t offset = (resolutionX + 1) * (resolutionY + 1);
vertices.resize(offset + 12);
toNvShared(vertices[0 + offset].p) = point + (t1 + t2) * size;
toNvShared(vertices[1 + offset].p) = point + (t2 - t1) * size;
toNvShared(vertices[2 + offset].p) = point + (-t1 - t2) * size;
toNvShared(vertices[3 + offset].p) = point + (t1 - t2) * size;
toNvShared(vertices[8 + offset].p) = point + (t1 + t2) * jaggedPlaneSize;
toNvShared(vertices[9 + offset].p) = point + (t2 - t1) * jaggedPlaneSize;
toNvShared(vertices[10 + offset].p) = point + (-t1 - t2) * jaggedPlaneSize;
toNvShared(vertices[11 + offset].p) = point + (t1 - t2) * jaggedPlaneSize;
toNvShared(vertices[4 + offset].p) = point + (t1 + t2 + lNormal) * size;
toNvShared(vertices[5 + offset].p) = point + (t2 - t1 + lNormal) * size;
toNvShared(vertices[6 + offset].p) = point + (-t1 - t2 + lNormal) * size;
toNvShared(vertices[7 + offset].p) = point + (t1 - t2 + lNormal) * size;
int32_t edgeOffset = edges.size();
edges.push_back({0 + offset, 1 + offset});
edges.push_back({ 1 + offset, 2 + offset });
edges.push_back({ 2 + offset, 3 + offset });
edges.push_back({3 + offset, 0 + offset});
edges.push_back({ 11 + offset, 10 + offset });
edges.push_back({ 10 + offset, 9 + offset });
edges.push_back({ 9 + offset, 8 + offset });
edges.push_back({ 8 + offset, 11 + offset });
facets.push_back({ edgeOffset, 8, id, interiorMaterialId, -1 });
edges.push_back({ 0 + offset, 3 + offset });
edges.push_back({ 3 + offset, 7 + offset });
edges.push_back({ 7 + offset, 4 + offset });
edges.push_back({ 4 + offset, 0 + offset });
facets.push_back({ 8 + edgeOffset, 4, id, interiorMaterialId, -1 });
edges.push_back({ 3 + offset, 2 + offset });
edges.push_back({ 2 + offset, 6 + offset });
edges.push_back({ 6 + offset, 7 + offset });
edges.push_back({ 7 + offset, 3 + offset });
facets.push_back({ 12 + edgeOffset, 4, id, interiorMaterialId, -1 });
edges.push_back({ 5 + offset, 6 + offset });
edges.push_back({ 6 + offset, 2 + offset });
edges.push_back({ 2 + offset, 1 + offset });
edges.push_back({ 1 + offset, 5 + offset });
facets.push_back({ 16 + edgeOffset, 4, id, interiorMaterialId, -1 });
edges.push_back({ 4 + offset, 5 + offset });
edges.push_back({ 5 + offset, 1 + offset });
edges.push_back({ 1 + offset, 0 + offset });
edges.push_back({ 0 + offset, 4 + offset });
facets.push_back({ 20 + edgeOffset, 4, id, interiorMaterialId, -1 });
edges.push_back({ 4 + offset, 7 + offset });
edges.push_back({ 7 + offset, 6 + offset });
edges.push_back({ 6 + offset, 5 + offset });
edges.push_back({ 5 + offset, 4 + offset });
facets.push_back({ 24 + edgeOffset, 4, id, interiorMaterialId, -1 });
//
return new MeshImpl(vertices.data(), edges.data(), facets.data(), vertices.size(), edges.size(), facets.size());
}
Mesh* getBigBox(const NvVec3& point, float size, int32_t interiorMaterialId)
{
NvVec3 normal(0, 0, 1);
normal.normalize();
NvVec3 t1, t2;
getTangents(normal, t1, t2);
std::vector<Vertex> positions(8);
toNvShared(positions[0].p) = point + (t1 + t2 - normal) * size;
toNvShared(positions[1].p) = point + (t2 - t1 - normal) * size;
toNvShared(positions[2].p) = point + (-t1 - t2 - normal) * size;
toNvShared(positions[3].p) = point + (t1 - t2 - normal) * size;
toNvShared(positions[4].p) = point + (t1 + t2 + normal) * size;
toNvShared(positions[5].p) = point + (t2 - t1 + normal) * size;
toNvShared(positions[6].p) = point + (-t1 - t2 + normal) * size;
toNvShared(positions[7].p) = point + (t1 - t2 + normal) * size;
positions[0].uv[0] = {0, 0};
positions[1].uv[0] = {UV_SCALE, 0};
positions[2].uv[0] = {UV_SCALE, UV_SCALE};
positions[3].uv[0] = {0, UV_SCALE};
positions[4].uv[0] = {0, 0};
positions[5].uv[0] = {UV_SCALE, 0};
positions[6].uv[0] = {UV_SCALE, UV_SCALE};
positions[7].uv[0] = {0, UV_SCALE};
std::vector<Edge> edges;
std::vector<Facet> facets;
edges.push_back({0, 1});
edges.push_back({1, 2});
edges.push_back({2, 3});
edges.push_back({3, 0});
facets.push_back({0, 4, 0, interiorMaterialId, -1});
edges.push_back({0, 3});
edges.push_back({3, 7});
edges.push_back({7, 4});
edges.push_back({4, 0});
facets.push_back({4, 4, 0, interiorMaterialId, -1});
edges.push_back({3, 2});
edges.push_back({2, 6});
edges.push_back({6, 7});
edges.push_back({7, 3});
facets.push_back({8, 4, 0, interiorMaterialId, -1});
edges.push_back({5, 6});
edges.push_back({6, 2});
edges.push_back({2, 1});
edges.push_back({1, 5});
facets.push_back({12, 4, 0, interiorMaterialId, -1});
edges.push_back({4, 5});
edges.push_back({5, 1});
edges.push_back({1, 0});
edges.push_back({0, 4});
facets.push_back({16, 4, 0, interiorMaterialId, -1});
edges.push_back({4, 7});
edges.push_back({7, 6});
edges.push_back({6, 5});
edges.push_back({5, 4});
facets.push_back({20, 4, 0, interiorMaterialId, -1});
for (int i = 0; i < 8; ++i)
positions[i].n = {0, 0, 0};
return new MeshImpl(positions.data(), edges.data(), facets.data(), static_cast<uint32_t>(positions.size()),
static_cast<uint32_t>(edges.size()), static_cast<uint32_t>(facets.size()));
}
bool CmpSharedFace::
operator()(const std::pair<nvidia::NvVec3, nvidia::NvVec3>& pv1, const std::pair<nvidia::NvVec3, nvidia::NvVec3>& pv2) const
{
CmpVec vc;
if ((pv1.first - pv2.first).magnitude() < 1e-5)
{
return vc(pv1.second, pv2.second);
}
return vc(pv1.first, pv2.first);
}
#define INDEXER_OFFSET (1ll << 32)
void buildCuttingConeFaces(const CutoutConfiguration& conf, const std::vector<std::vector<nvidia::NvVec3> >& cutoutPoints,
float heightBot, float heightTop, float conicityBot, float conicityTop, int64_t& id,
int32_t seed, int32_t interiorMaterialId, SharedFacesMap& sharedFacesMap)
{
if (conf.noise.amplitude <= FLT_EPSILON)
{
return;
}
std::map<nvidia::NvVec3, std::pair<uint32_t, std::vector<nvidia::NvVec3> >, CmpVec> newCutoutPoints;
uint32_t resH = std::max((uint32_t)std::roundf((heightBot + heightTop) / conf.noise.samplingInterval.z), 1u);
// generate noisy faces
SimplexNoise nEval(conf.noise.amplitude, conf.noise.frequency, conf.noise.octaveNumber, seed);
for (uint32_t i = 0; i < cutoutPoints.size(); i++)
{
auto& points = cutoutPoints[i];
uint32_t pointCount = points.size();
float finalP = 0, currentP = 0;
for (uint32_t j = 0; j < pointCount; j++)
{
finalP += (points[(j + 1) % pointCount] - points[j]).magnitude();
}
for (uint32_t p = 0; p < pointCount; p++)
{
auto p0 = points[p];
auto p1 = points[(p + 1) % pointCount];
auto cp0 = newCutoutPoints.find(p0);
if (cp0 == newCutoutPoints.end())
{
newCutoutPoints[p0] = std::make_pair(0u, std::vector<nvidia::NvVec3>(resH + 1, nvidia::NvVec3(0.f)));
cp0 = newCutoutPoints.find(p0);
}
auto cp1 = newCutoutPoints.find(p1);
if (cp1 == newCutoutPoints.end())
{
newCutoutPoints[p1] = std::make_pair(0u, std::vector<nvidia::NvVec3>(resH + 1, nvidia::NvVec3(0.f)));
cp1 = newCutoutPoints.find(p1);
}
auto vec = p1 - p0;
auto cPos = (p0 + p1) * 0.5f;
uint32_t numPts = (uint32_t)(std::abs(vec.x) / conf.noise.samplingInterval.x +
std::abs(vec.y) / conf.noise.samplingInterval.y) +
1;
auto normal = vec.cross(nvidia::NvVec3(0, 0, 1));
normal = normal;
auto p00 = p0 * conicityBot;
p00.z = -heightBot;
auto p01 = p1 * conicityBot;
p01.z = -heightBot;
auto p10 = p0 * conicityTop;
p10.z = heightTop;
auto p11 = p1 * conicityTop;
p11.z = heightTop;
PlaneStepper stepper(p00, p01, p10, p11, resH, numPts);
PlaneStepper stepper1(normal, cPos, heightTop, vec.magnitude() * 0.5f, resH, numPts, true);
stepper1.getNormal(0, 0);
auto t = std::make_pair(p0, p1);
auto sfIt = sharedFacesMap.find(t);
if (sfIt == sharedFacesMap.end() && sharedFacesMap.find(std::make_pair(p1, p0)) == sharedFacesMap.end())
{
sharedFacesMap[t] = SharedFace(numPts, resH, -(id + INDEXER_OFFSET), interiorMaterialId);
sfIt = sharedFacesMap.find(t);
auto& SF = sfIt->second;
getNoisyFace(SF.vertices, SF.edges, SF.facets, resH, numPts,
nvidia::NvVec2(0, CYLINDER_UV_SCALE * currentP / (heightBot + heightTop)),
nvidia::NvVec2(CYLINDER_UV_SCALE / resH,
CYLINDER_UV_SCALE * vec.magnitude() / (heightBot + heightTop) / numPts),
stepper, nEval, id++ + INDEXER_OFFSET, interiorMaterialId, true);
currentP += vec.magnitude();
cp0->second.first++;
cp1->second.first++;
for (uint32_t k = 0; k <= resH; k++)
{
cp0->second.second[k] += toNvShared(SF.vertices[k].p);
cp1->second.second[k] += toNvShared(SF.vertices[SF.vertices.size() - resH - 1 + k].p);
}
}
}
}
// limit faces displacement iteratively
for (uint32_t i = 0; i < cutoutPoints.size(); i++)
{
auto& points = cutoutPoints[i];
uint32_t pointCount = points.size();
for (uint32_t p = 0; p < pointCount; p++)
{
auto p0 = points[p];
auto p1 = points[(p + 1) % pointCount];
auto p2 = points[(p + 2) % pointCount];
auto& cp1 = newCutoutPoints.find(p1)->second;
float d = nvidia::NvClamp((p1 - p0).getNormalized().dot((p2 - p1).getNormalized()), 0.f, 1.f);
for (uint32_t h = 0; h <= resH; h++)
{
float z = cp1.second[h].z;
float conicity = (conicityBot * h + conicityTop * (resH - h)) / resH;
cp1.second[h] = cp1.second[h] * d + p1 * cp1.first * conicity * (1.f - d);
cp1.second[h].z = z;
}
}
}
// relax nearby points for too big faces displacement limitations
for (uint32_t i = 0; i < cutoutPoints.size(); i++)
{
auto& points = cutoutPoints[i];
uint32_t pointCount = points.size();
for (uint32_t p = 0; p < pointCount; p++)
{
auto p0 = points[p];
auto p1 = points[(p + 1) % pointCount];
auto& cp0 = newCutoutPoints.find(p0)->second;
auto& cp1 = newCutoutPoints.find(p1)->second;
auto SFIt = sharedFacesMap.find(std::make_pair(p0, p1));
uint32_t idx0 = 0, idx1;
if (SFIt == sharedFacesMap.end())
{
SFIt = sharedFacesMap.find(std::make_pair(p1, p0));
idx1 = 0;
idx0 = SFIt->second.w * (SFIt->second.h + 1);
}
else
{
idx1 = SFIt->second.w * (SFIt->second.h + 1);
}
for (uint32_t h = 0; h <= resH; h++)
{
float z = cp1.second[h].z;
float R0 = (cp0.second[h] / cp0.first - toNvShared(SFIt->second.vertices[idx0 + h].p)).magnitude();
float R1 = (cp1.second[h] / cp1.first - toNvShared(SFIt->second.vertices[idx1 + h].p)).magnitude();
float R = R0 - R1;
float r = 0.25f * (cp1.second[h] / cp1.first - cp0.second[h] / cp0.first).magnitude();
float conicity = (conicityBot * h + conicityTop * (resH - h)) / resH;
if (R > r)
{
float w = std::min(1.f, r / R);
cp1.second[h] = cp1.second[h] * w + p1 * cp1.first * conicity * (1.f - w);
cp1.second[h].z = z;
}
}
}
for (int32_t p = pointCount - 1; p >= 0; p--)
{
auto p0 = points[p];
auto p1 = points[unsignedMod(p - 1, pointCount)];
auto& cp0 = newCutoutPoints.find(p0)->second;
auto& cp1 = newCutoutPoints.find(p1)->second;
auto SFIt = sharedFacesMap.find(std::make_pair(p0, p1));
uint32_t idx0 = 0, idx1;
if (SFIt == sharedFacesMap.end())
{
SFIt = sharedFacesMap.find(std::make_pair(p1, p0));
idx1 = 0;
idx0 = SFIt->second.w * (SFIt->second.h + 1);
}
else
{
idx1 = SFIt->second.w * (SFIt->second.h + 1);
}
for (uint32_t h = 0; h <= resH; h++)
{
float z = cp1.second[h].z;
float R0 = (cp0.second[h] / cp0.first - toNvShared(SFIt->second.vertices[idx0 + h].p)).magnitude();
float R1 = (cp1.second[h] / cp1.first - toNvShared(SFIt->second.vertices[idx1 + h].p)).magnitude();
float R = R0 - R1;
float r = 0.25f * (cp1.second[h] / cp1.first - cp0.second[h] / cp0.first).magnitude();
float conicity = (conicityBot * h + conicityTop * (resH - h)) / resH;
if (R > r)
{
float w = std::min(1.f, r / R);
cp1.second[h] = cp1.second[h] * w + p1 * cp1.first * conicity * (1.f - w);
cp1.second[h].z = z;
}
}
}
}
// glue faces
for (auto& SF : sharedFacesMap)
{
auto& cp0 = newCutoutPoints.find(SF.first.first)->second;
auto& cp1 = newCutoutPoints.find(SF.first.second)->second;
auto& v = SF.second.vertices;
float invW = 1.f / SF.second.w;
for (uint32_t w = 0; w <= SF.second.w; w++)
{
for (uint32_t h = 0; h <= SF.second.h; h++)
{
toNvShared(v[w * (SF.second.h + 1) + h].p) +=
((cp0.second[h] / cp0.first - toNvShared(v[h].p)) * (SF.second.w - w) +
(cp1.second[h] / cp1.first - toNvShared(v[SF.second.w * (SF.second.h + 1) + h].p)) * w) *
invW;
}
}
}
}
Mesh* getNoisyCuttingCone(const std::vector<nvidia::NvVec3>& points, const std::set<int32_t>& smoothingGroups,
const nvidia::NvTransform& transform, bool useSmoothing, float heightBot, float heightTop,
float conicityMultiplierBot, float conicityMultiplierTop, nvidia::NvVec3 samplingInterval,
int32_t interiorMaterialId, const SharedFacesMap& sharedFacesMap, bool inverseNormals)
{
NV_UNUSED(conicityMultiplierTop);
NV_UNUSED(conicityMultiplierBot);
uint32_t pointCount = points.size();
uint32_t resP = pointCount;
for (uint32_t i = 0; i < pointCount; i++)
{
auto vec = (points[(i + 1) % pointCount] - points[i]);
resP += (uint32_t)(std::abs(vec.x) / samplingInterval.x + std::abs(vec.y) / samplingInterval.y);
}
uint32_t resH = std::max((uint32_t)std::roundf((heightBot + heightTop) / samplingInterval.z), 1u);
std::vector<Vertex> positions;
positions.reserve((resH + 1) * (resP + 1));
std::vector<Edge> edges;
edges.reserve(resH * resP * 6 + (resP + 1) * 2);
std::vector<Facet> facets;
facets.reserve(resH * resP * 2 + 2);
uint32_t pCount = 0;
int sg = useSmoothing ? 1 : -1;
for (uint32_t p = 0; p < pointCount; p++)
{
if (useSmoothing && smoothingGroups.find(p) != smoothingGroups.end())
{
sg = sg ^ 3;
}
auto p0 = points[p];
auto p1 = points[(p + 1) % pointCount];
uint32_t firstVertexIndex = positions.size();
uint32_t firstEdgeIndex = edges.size();
auto sfIt = sharedFacesMap.find(std::make_pair(p0, p1));
int32_t vBegin = 0, vEnd = -1, vIncr = 1;
if (sfIt == sharedFacesMap.end())
{
sfIt = sharedFacesMap.find(std::make_pair(p1, p0));
;
vBegin = sfIt->second.w;
vIncr = -1;
}
else
{
vEnd = sfIt->second.w + 1;
}
auto& SF = sfIt->second;
positions.resize(firstVertexIndex + (SF.w + 1) * (SF.h + 1));
if (vBegin < vEnd)
{
for (auto& e : SF.edges)
{
edges.push_back({e.s + firstVertexIndex, e.e + firstVertexIndex});
}
for (auto& f : SF.facets)
{
facets.push_back(f);
facets.back().firstEdgeNumber += firstEdgeIndex;
facets.back().smoothingGroup = sg;
}
}
else
{
fillEdgesAndFaces(edges, facets, SF.h, SF.w, firstVertexIndex, positions.size(), SF.f.userData,
SF.f.materialId, sg, true);
}
for (int32_t v = vBegin; v != vEnd; v += vIncr)
{
std::copy(SF.vertices.begin() + v * (resH + 1), SF.vertices.begin() + (v + 1) * (SF.h + 1),
positions.begin() + firstVertexIndex);
firstVertexIndex += SF.h + 1;
}
pCount += SF.vertices.size() / (resH + 1) - 1;
}
if (inverseNormals)
{
for (uint32_t e = 0; e < edges.size(); e += 3)
{
std::swap(edges[e + 0].s, edges[e + 0].e);
std::swap(edges[e + 1].s, edges[e + 1].e);
std::swap(edges[e + 2].s, edges[e + 2].e);
std::swap(edges[e + 0], edges[e + 2]);
}
}
uint32_t totalCount = pCount + pointCount;
calculateNormals(positions, resH, totalCount - 1, inverseNormals);
std::vector<float> xPos, yPos;
int32_t ii = 0;
for (auto& p : positions)
{
if ((ii++) % (resH + 1) == 1)
{
xPos.push_back(p.p.x);
yPos.push_back(p.p.y);
}
toNvShared(p.p) = transform.transform(toNvShared(p.p));
toNvShared(p.n) = transform.rotate(toNvShared(p.n));
}
totalCount /= 2;
for (uint32_t i = 0; i < totalCount; i++)
{
uint32_t idx = 2 * i * (resH + 1);
edges.push_back({idx, (idx + 2 * (resH + 1)) % (uint32_t)positions.size()});
}
for (int32_t i = totalCount; i > 0; i--)
{
uint32_t idx = (2 * i + 1) * (resH + 1) - 1;
edges.push_back({ idx % (uint32_t)positions.size(), idx - 2 * (resH + 1)});
}
if (smoothingGroups.find(0) != smoothingGroups.end() || smoothingGroups.find(pointCount - 1) != smoothingGroups.end())
{
if (facets[0].smoothingGroup == facets[facets.size() - 1].smoothingGroup)
{
for (uint32_t i = 0; i < resH; i++)
{
facets[i].smoothingGroup = 4;
}
}
}
facets.push_back({ (int32_t)(resH * pCount * 6), totalCount, 0, interiorMaterialId, -1 });
facets.push_back({ (int32_t)(resH * pCount * 6 + totalCount), totalCount, 0, interiorMaterialId, -1 });
return new MeshImpl(positions.data(), edges.data(), facets.data(), static_cast<uint32_t>(positions.size()),
static_cast<uint32_t>(edges.size()), static_cast<uint32_t>(facets.size()));
}
Mesh* getCuttingCone(const CutoutConfiguration& conf, const std::vector<nvidia::NvVec3>& points,
const std::set<int32_t>& smoothingGroups, float heightBot, float heightTop, float conicityBot,
float conicityTop, int64_t& id, int32_t seed, int32_t interiorMaterialId,
const SharedFacesMap& sharedFacesMap, bool inverseNormals)
{
NV_UNUSED(seed);
uint32_t pointCount = points.size();
if (conf.noise.amplitude > FLT_EPSILON)
{
return getNoisyCuttingCone(points, smoothingGroups, toNvShared(conf.transform), conf.useSmoothing, heightBot, heightTop,
conicityBot, conicityTop, toNvShared(conf.noise.samplingInterval), interiorMaterialId,
sharedFacesMap, inverseNormals);
}
float currentP = 0;
std::vector<Vertex> positions((pointCount + 1) * 2);
std::vector<Edge> edges(pointCount * 6 + 2);
std::vector<Facet> facets(pointCount + 2);
int sg = conf.useSmoothing ? 1 : -1;
for (uint32_t i = 0; i < pointCount + 1; i++)
{
if (conf.useSmoothing && smoothingGroups.find(i) != smoothingGroups.end())
{
sg = sg ^ 3;
}
uint32_t i1 = i + pointCount + 1;
uint32_t i3 = i + 1;
uint32_t i2 = i3 + pointCount + 1;
auto& p0 = positions[i];
auto& p1 = positions[i1];
p0.n = p1.n = {0.f, 0.f, 0.f};
toNvShared(p0.p) = points[i % pointCount] * conicityBot;
p0.p.z = -heightBot;
toNvShared(p1.p) = points[i % pointCount] * conicityTop;
p1.p.z = heightTop;
toNvShared(p0.p) = toNvShared(conf.transform).transform(toNvShared(p0.p));
toNvShared(p1.p) = toNvShared(conf.transform).transform(toNvShared(p1.p));
p0.uv[0] = {0.f, CYLINDER_UV_SCALE * currentP / (heightBot + heightTop)};
p1.uv[0] = {CYLINDER_UV_SCALE, CYLINDER_UV_SCALE * currentP / (heightBot + heightTop)};
if (i == pointCount)
{
break;
}
currentP += (points[(i + 1) % pointCount] - points[i]).magnitude();
int32_t edgeIdx = 4 * i;
if (inverseNormals)
{
edges[edgeIdx + 1] = {i1, i2};
edges[edgeIdx + 2] = {i2, i3};
edges[edgeIdx + 3] = {i3, i};
edges[edgeIdx + 0] = {i, i1};
}
else
{
edges[edgeIdx + 0] = {i, i3};
edges[edgeIdx + 1] = {i3, i2};
edges[edgeIdx + 2] = {i2, i1};
edges[edgeIdx + 3] = {i1, i};
}
facets[i] = {edgeIdx, 4, id++, interiorMaterialId, sg};
edges[5 * pointCount + i + 1] = {i1, i2};
edges[5 * pointCount - i - 1] = {i3, i};
}
edges[5 * pointCount] = {0, pointCount};
edges[6 * pointCount + 1] = {2 * pointCount + 1, pointCount + 1};
if (smoothingGroups.find(0) != smoothingGroups.end() || smoothingGroups.find(pointCount - 1) != smoothingGroups.end())
{
if (facets[0].smoothingGroup == facets[pointCount - 1].smoothingGroup)
{
facets[0].smoothingGroup = 4;
}
}
facets[pointCount + 0] = { 4 * (int32_t)pointCount, pointCount + 1, 0, interiorMaterialId, -1 };
facets[pointCount + 1] = { 5 * (int32_t)pointCount + 1, pointCount + 1, interiorMaterialId, 0, -1 };
return new MeshImpl(positions.data(), edges.data(), facets.data(), static_cast<uint32_t>(positions.size()),
static_cast<uint32_t>(edges.size()), static_cast<uint32_t>(facets.size()));
}
} // namespace Blast
} // namespace Nv |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshNoiser.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
// This warning arises when using some stl containers with older versions of VC
// c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code
#include "NvPreprocessor.h"
#if NV_VC && NV_VC < 14
#pragma warning(disable : 4702)
#endif
#include "NvBlastExtAuthoringMeshNoiser.h"
#include "NvBlastExtAuthoringPerlinNoise.h"
#include <set>
#include <queue>
#include <NvBlastAssert.h>
#include <NvBlastNvSharedHelpers.h>
using namespace Nv::Blast;
using namespace std;
void MeshNoiser::computeFalloffAndNormals()
{
// Map newly created vertices according to positions
computePositionedMapping();
mGeometryGraph.resize(mVertices.size());
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
if (mTrMeshEdToTr[i].c == 0)
{
continue;
}
int32_t v1 = mPositionMappedVrt[mEdges[i].s];
int32_t v2 = mPositionMappedVrt[mEdges[i].e];
if (std::find(mGeometryGraph[v1].begin(), mGeometryGraph[v1].end(), v2) == mGeometryGraph[v1].end())
mGeometryGraph[v1].push_back(v2);
if (std::find(mGeometryGraph[v2].begin(), mGeometryGraph[v2].end(), v1) == mGeometryGraph[v2].end())
mGeometryGraph[v2].push_back(v1);
}
mVerticesDistances.clear();
mVerticesDistances.resize(mVertices.size(), 10000.0f);
std::queue<int32_t> que;
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
if (mTrMeshEdToTr[i].c != 0 && (mEdgeFlag[i] == EXTERNAL_EDGE || mEdgeFlag[i] == EXTERNAL_BORDER_EDGE))
{
int32_t v1 = mPositionMappedVrt[mEdges[i].s];
int32_t v2 = mPositionMappedVrt[mEdges[i].e];
mVerticesDistances[v1] = 0.0f;
mVerticesDistances[v2] = 0.0f;
que.push(v1);
que.push(v2);
}
}
while (!que.empty())
{
int32_t curr = que.front();
que.pop();
for (uint32_t i = 0; i < mGeometryGraph[curr].size(); ++i)
{
int32_t to = mGeometryGraph[curr][i];
float d = mVerticesDistances[curr] + 0.1f; // (mVertices[to].p - mVertices[curr].p).magnitudeSquared();
if (d < mVerticesDistances[to])
{
mVerticesDistances[to] = d;
que.push(to);
}
}
}
for (uint32_t i = 0; i < mVerticesDistances.size(); ++i)
{
int32_t from = mPositionMappedVrt[i];
mVerticesDistances[i] = mVerticesDistances[from];
}
}
bool edgeOverlapTest(NvcVec3& as, NvcVec3& ae, NvcVec3& bs, NvcVec3& be)
{
// return false;
if (std::max(std::min(as.x, ae.x), std::min(bs.x, be.x)) > std::min(std::max(as.x, ae.x), std::max(bs.x, be.x)))
return false;
if (std::max(std::min(as.y, ae.y), std::min(bs.y, be.y)) > std::min(std::max(as.y, ae.y), std::max(bs.y, be.y)))
return false;
if (std::max(std::min(as.z, ae.z), std::min(bs.z, be.z)) > std::min(std::max(as.z, ae.z), std::max(bs.z, be.z)))
return false;
return (toNvShared(bs - as).cross(toNvShared(ae - as))).magnitudeSquared() < 1e-6f &&
(toNvShared(be - as).cross(toNvShared(ae - as))).magnitudeSquared() < 1e-6f;
}
void MeshNoiser::computePositionedMapping()
{
std::map<NvcVec3, int32_t, VrtPositionComparator> mPosMap;
mPositionMappedVrt.clear();
mPositionMappedVrt.resize(mVertices.size());
for (uint32_t i = 0; i < mVertices.size(); ++i)
{
auto it = mPosMap.find(mVertices[i].p);
if (it == mPosMap.end())
{
mPosMap[mVertices[i].p] = i;
mPositionMappedVrt[i] = i;
}
else
{
mPositionMappedVrt[i] = it->second;
}
}
}
void MeshNoiser::relax(int32_t iteration, float factor, std::vector<Vertex>& vertices)
{
std::vector<NvVec3> verticesTemp(vertices.size());
std::vector<NvVec3> normalsTemp(vertices.size());
for (int32_t iter = 0; iter < iteration; ++iter)
{
for (uint32_t i = 0; i < vertices.size(); ++i)
{
if (mRestrictionFlag[i])
{
continue;
}
NvVec3 cps = toNvShared(vertices[i].p);
NvVec3 cns = mVerticesNormalsSmoothed[i];
NvVec3 averaged(0, 0, 0);
NvVec3 averagedNormal(0, 0, 0);
for (uint32_t p = 0; p < mGeometryGraph[mPositionMappedVrt[i]].size(); ++p)
{
int32_t to = mGeometryGraph[mPositionMappedVrt[i]][p];
averaged += toNvShared(vertices[to].p);
averagedNormal += mVerticesNormalsSmoothed[to];
}
averaged *= (1.0f / mGeometryGraph[mPositionMappedVrt[i]].size());
averagedNormal *= (1.0f / mGeometryGraph[mPositionMappedVrt[i]].size());
verticesTemp[i] = cps + (averaged - cps) * factor;
normalsTemp[i] = cns * (1.0f - factor) + averagedNormal * factor;
}
for (uint32_t i = 0; i < vertices.size(); ++i)
{
if (mRestrictionFlag[i])
{
continue;
}
vertices[i].p = fromNvShared(verticesTemp[i]);
mVerticesNormalsSmoothed[i] = normalsTemp[i].getNormalized();
}
}
}
NV_FORCE_INLINE void
markEdge(int32_t ui, int32_t ed, std::vector<MeshNoiser::EdgeFlag>& shortMarkup, std::vector<int32_t>& lastOwner)
{
if (shortMarkup[ed] == MeshNoiser::NONE)
{
if (ui == 0)
{
shortMarkup[ed] = MeshNoiser::EXTERNAL_EDGE;
}
else
{
shortMarkup[ed] = MeshNoiser::INTERNAL_EDGE;
}
lastOwner[ed] = ui;
}
else
{
if (ui != 0)
{
if (shortMarkup[ed] == MeshNoiser::EXTERNAL_EDGE)
{
shortMarkup[ed] = MeshNoiser::EXTERNAL_BORDER_EDGE;
}
if ((shortMarkup[ed] == MeshNoiser::INTERNAL_EDGE) && ui != lastOwner[ed])
{
shortMarkup[ed] = MeshNoiser::INTERNAL_BORDER_EDGE;
}
}
else
{
if (shortMarkup[ed] != MeshNoiser::EXTERNAL_EDGE)
{
shortMarkup[ed] = MeshNoiser::EXTERNAL_BORDER_EDGE;
}
}
}
}
void MeshNoiser::prebuildEdgeFlagArray()
{
mRestrictionFlag.clear();
mRestrictionFlag.resize(mVertices.size());
mEdgeFlag.clear();
mEdgeFlag.resize(mEdges.size(), NONE);
std::map<NvcVec3, int32_t, VrtPositionComparator> mPosMap;
mPositionMappedVrt.clear();
mPositionMappedVrt.resize(mVertices.size(), 0);
for (uint32_t i = 0; i < mVertices.size(); ++i)
{
auto it = mPosMap.find(mVertices[i].p);
if (it == mPosMap.end())
{
mPosMap[mVertices[i].p] = i;
mPositionMappedVrt[i] = i;
}
else
{
mPositionMappedVrt[i] = it->second;
}
}
std::map<Edge, int32_t> mPositionEdgeMap;
std::vector<int32_t> mPositionBasedEdges(mEdges.size());
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
Edge tmp = { mPositionMappedVrt[mEdges[i].s], mPositionMappedVrt[mEdges[i].e] };
if (tmp.e < tmp.s)
std::swap(tmp.e, tmp.s);
auto it = mPositionEdgeMap.find(tmp);
if (it == mPositionEdgeMap.end())
{
mPositionEdgeMap[tmp] = i;
mPositionBasedEdges[i] = i;
}
else
{
mPositionBasedEdges[i] = it->second;
}
}
std::vector<EdgeFlag> shortMarkup(mEdges.size(), NONE);
std::vector<int32_t> lastOwner(mEdges.size(), 0);
std::vector<std::vector<int32_t> > edgeOverlap(mEdges.size());
for (auto it1 = mPositionEdgeMap.begin(); it1 != mPositionEdgeMap.end(); ++it1)
{
auto it2 = it1;
it2++;
for (; it2 != mPositionEdgeMap.end(); ++it2)
{
Edge& ed1 = mEdges[it1->second];
Edge& ed2 = mEdges[it2->second];
if (edgeOverlapTest(mVertices[ed1.s].p, mVertices[ed1.e].p, mVertices[ed2.s].p, mVertices[ed2.e].p))
{
edgeOverlap[it1->second].push_back(it2->second);
}
}
}
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
int32_t ui = mTriangles[i].userData;
int32_t ed = mPositionBasedEdges[findEdge({ mTriangles[i].ea, mTriangles[i].eb })];
markEdge(ui, ed, shortMarkup, lastOwner);
for (uint32_t ov = 0; ov < edgeOverlap[ed].size(); ++ov)
{
markEdge(ui, edgeOverlap[ed][ov], shortMarkup, lastOwner);
}
ed = mPositionBasedEdges[findEdge({ mTriangles[i].ea, mTriangles[i].ec })];
markEdge(ui, ed, shortMarkup, lastOwner);
for (uint32_t ov = 0; ov < edgeOverlap[ed].size(); ++ov)
{
markEdge(ui, edgeOverlap[ed][ov], shortMarkup, lastOwner);
}
ed = mPositionBasedEdges[findEdge({ mTriangles[i].eb, mTriangles[i].ec })];
markEdge(ui, ed, shortMarkup, lastOwner);
for (uint32_t ov = 0; ov < edgeOverlap[ed].size(); ++ov)
{
markEdge(ui, edgeOverlap[ed][ov], shortMarkup, lastOwner);
}
}
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
mEdgeFlag[i] = shortMarkup[mPositionBasedEdges[i]];
}
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
if (mTriangles[i].userData != 0)
continue;
int32_t ed = findEdge({ mTriangles[i].ea, mTriangles[i].eb });
mEdgeFlag[ed] = EXTERNAL_EDGE;
ed = findEdge({ mTriangles[i].ec, mTriangles[i].eb });
mEdgeFlag[ed] = EXTERNAL_EDGE;
ed = findEdge({ mTriangles[i].ea, mTriangles[i].ec });
mEdgeFlag[ed] = EXTERNAL_EDGE;
}
}
NV_FORCE_INLINE int32_t MeshNoiser::addVerticeIfNotExist(const Vertex& p)
{
auto it = mVertMap.find(p);
if (it == mVertMap.end())
{
mVertMap[p] = static_cast<int32_t>(mVertices.size());
mVertices.push_back(p);
return static_cast<int32_t>(mVertices.size()) - 1;
}
else
{
return it->second;
}
}
NV_FORCE_INLINE int32_t MeshNoiser::addEdge(const Edge& e)
{
Edge ed = e;
if (ed.e < ed.s)
std::swap(ed.s, ed.e);
auto it = mEdgeMap.find(ed);
if (it == mEdgeMap.end())
{
mTrMeshEdToTr.push_back(EdgeToTriangles());
mEdgeMap[ed] = (int)mEdgeMap.size();
mEdges.push_back(ed);
mEdgeFlag.push_back(INTERNAL_EDGE);
return (int32_t)mEdges.size() - 1;
}
else
{
return it->second;
}
}
NV_FORCE_INLINE int32_t MeshNoiser::findEdge(const Edge& e)
{
Edge ed = e;
if (ed.e < ed.s)
std::swap(ed.s, ed.e);
auto it = mEdgeMap.find(ed);
if (it == mEdgeMap.end())
{
return -1;
}
else
{
return it->second;
}
}
/**
Weld input vertices, build edge and triangle buffers
*/
void MeshNoiser::setMesh(const vector<Triangle>& mesh)
{
uint32_t a, b, c;
nvidia::NvBounds3 box;
box.setEmpty();
for (uint32_t i = 0; i < mesh.size(); ++i)
{
const Triangle& tr = mesh[i];
a = addVerticeIfNotExist(tr.a);
b = addVerticeIfNotExist(tr.b);
c = addVerticeIfNotExist(tr.c);
box.include(toNvShared(tr.a.p));
box.include(toNvShared(tr.b.p));
box.include(toNvShared(tr.c.p));
addEdge({ a, b });
addEdge({ b, c });
addEdge({ a, c });
mTriangles.push_back({a, b, c});
mTriangles.back().userData = tr.userData;
mTriangles.back().materialId = tr.materialId;
mTriangles.back().smoothingGroup = tr.smoothingGroup;
}
mOffset = box.getCenter();
mScale = max(box.getExtents(0), max(box.getExtents(1), box.getExtents(2)));
float invScale = 1.0f / mScale;
for (uint32_t i = 0; i < mVertices.size(); ++i)
{
mVertices[i].p = mVertices[i].p - fromNvShared(box.getCenter());
mVertices[i].p = mVertices[i].p * invScale;
}
}
void MeshNoiser::tesselateInternalSurface(float maxLenIn)
{
if (mTriangles.empty())
{
return;
}
updateEdgeTriangleInfo();
prebuildEdgeFlagArray();
mRestrictionFlag.resize(mVertices.size(), 0);
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
if (mEdgeFlag[i] == EXTERNAL_EDGE || mEdgeFlag[i] == EXTERNAL_BORDER_EDGE || mEdgeFlag[i] == INTERNAL_BORDER_EDGE)
{
mRestrictionFlag[mEdges[i].s] = 1;
mRestrictionFlag[mEdges[i].e] = 1;
}
}
float maxLen = maxLenIn;
float mlSq = maxLen * maxLen;
float minD = maxLen * 0.5f;
minD = minD * minD;
for (int32_t iter = 0; iter < 15; ++iter)
{
updateVertEdgeInfo();
uint32_t oldSize = (uint32_t)mEdges.size();
for (uint32_t i = 0; i < oldSize; ++i)
{
if (mEdgeFlag[i] == EXTERNAL_EDGE || mEdgeFlag[i] == INTERNAL_BORDER_EDGE)
{
continue;
}
if (toNvShared(mVertices[mEdges[i].s].p - mVertices[mEdges[i].e].p).magnitudeSquared() < minD)
{
collapseEdge(i);
}
}
oldSize = (uint32_t)mEdges.size();
updateEdgeTriangleInfo();
for (uint32_t i = 0; i < oldSize; ++i)
{
if (mEdgeFlag[i] == EXTERNAL_EDGE)
{
continue;
}
if (toNvShared(mVertices[mEdges[i].s].p - mVertices[mEdges[i].e].p).magnitudeSquared() > mlSq)
{
divideEdge(i);
}
}
}
computeFalloffAndNormals();
prebuildTesselatedTriangles();
isTesselated = true;
}
void MeshNoiser::updateEdgeTriangleInfo()
{
mTrMeshEdToTr.clear();
mTrMeshEdToTr.resize(mEdges.size());
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
TriangleIndexed& tr = mTriangles[i];
if (tr.ea == kNotValidVertexIndex)
continue;
int32_t ed = addEdge({ tr.ea, tr.eb });
mTrMeshEdToTr[ed].add(i);
ed = addEdge({ tr.ea, tr.ec });
mTrMeshEdToTr[ed].add(i);
ed = addEdge({ tr.ec, tr.eb });
mTrMeshEdToTr[ed].add(i);
}
}
void MeshNoiser::updateVertEdgeInfo()
{
mVertexToTriangleMap.clear();
mVertexToTriangleMap.resize(mVertices.size());
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
TriangleIndexed& tr = mTriangles[i];
if (tr.ea == kNotValidVertexIndex)
continue;
mVertexToTriangleMap[tr.ea].push_back(i);
mVertexToTriangleMap[tr.eb].push_back(i);
mVertexToTriangleMap[tr.ec].push_back(i);
}
mVertexValence.clear();
mVertexValence.resize(mVertices.size(), 0);
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
if (mTrMeshEdToTr[i].c != 0)
{
mVertexValence[mEdges[i].s]++;
mVertexValence[mEdges[i].e]++;
}
}
}
inline bool isContainEdge(const TriangleIndexed& t, uint32_t a, uint32_t b)
{
return (a == t.ea || a == t.eb || a == t.ec) && (b == t.ea || b == t.eb || b == t.ec);
}
void MeshNoiser::collapseEdge(int32_t id)
{
Edge cEdge = mEdges[id];
uint32_t from = cEdge.s;
uint32_t to = cEdge.e;
if (mRestrictionFlag[from] && mRestrictionFlag[to])
{
return;
}
if (mVertexValence[from] > mVertexValence[to])
{
std::swap(from, to);
}
if (mRestrictionFlag[from])
{
std::swap(from, to);
}
std::set<int32_t> connectedToBegin;
std::set<int32_t> connectedToEnd;
std::set<int32_t> neighborTriangles;
int32_t trWithEdge[2] = { -1, -1 };
int32_t cntr = 0;
for (uint32_t i = 0; i < mVertexToTriangleMap[from].size(); ++i)
{
if (mTriangles[mVertexToTriangleMap[from][i]].ea == kNotValidVertexIndex)
continue;
if (neighborTriangles.insert(mVertexToTriangleMap[from][i]).second &&
isContainEdge(mTriangles[mVertexToTriangleMap[from][i]] , from, to))
{
trWithEdge[cntr] = mVertexToTriangleMap[from][i];
cntr++;
}
}
for (uint32_t i = 0; i < mVertexToTriangleMap[to].size(); ++i)
{
if (mTriangles[mVertexToTriangleMap[to][i]].ea == kNotValidVertexIndex)
continue;
if (neighborTriangles.insert(mVertexToTriangleMap[to][i]).second &&
isContainEdge(mTriangles[mVertexToTriangleMap[to][i]], from, to))
{
trWithEdge[cntr] = mVertexToTriangleMap[to][i];
cntr++;
}
}
if (cntr == 0)
{
return;
}
if (cntr > 2)
{
return;
}
for (uint32_t i : neighborTriangles)
{
if (mTriangles[i].ea == from || mTriangles[i].eb == from || mTriangles[i].ec == from)
{
if (mTriangles[i].ea != to && mTriangles[i].ea != from)
connectedToBegin.insert(mTriangles[i].ea);
if (mTriangles[i].eb != to && mTriangles[i].eb != from)
connectedToBegin.insert(mTriangles[i].eb);
if (mTriangles[i].ec != to && mTriangles[i].ec != from)
connectedToBegin.insert(mTriangles[i].ec);
}
if (mTriangles[i].ea == to || mTriangles[i].eb == to || mTriangles[i].ec == to)
{
if (mTriangles[i].ea != to && mTriangles[i].ea != from)
connectedToEnd.insert(mTriangles[i].ea);
if (mTriangles[i].eb != to && mTriangles[i].eb != from)
connectedToEnd.insert(mTriangles[i].eb);
if (mTriangles[i].ec != to && mTriangles[i].ec != from)
connectedToEnd.insert(mTriangles[i].ec);
}
}
bool canBeCollapsed = true;
for (auto it = connectedToBegin.begin(); it != connectedToBegin.end(); ++it)
{
uint32_t currV = *it;
if (connectedToEnd.find(currV) == connectedToEnd.end())
continue;
bool found = false;
for (int32_t tr : neighborTriangles)
{
if ((mTriangles[tr].ea == from || mTriangles[tr].eb == from || mTriangles[tr].ec == from) &&
(mTriangles[tr].ea == to || mTriangles[tr].eb == to || mTriangles[tr].ec == to) &&
(mTriangles[tr].ea == currV || mTriangles[tr].eb == currV || mTriangles[tr].ec == currV))
{
found = true;
break;
}
}
if (!found)
{
canBeCollapsed = false;
break;
}
}
if (canBeCollapsed)
{
for (int32_t i : neighborTriangles)
{
if (trWithEdge[0] == i)
continue;
if (cntr == 2 && trWithEdge[1] == i)
continue;
TriangleIndexed tr = mTriangles[i];
NvVec3 oldNormal =
toNvShared(mVertices[tr.eb].p - mVertices[tr.ea].p).cross(toNvShared(mVertices[tr.ec].p - mVertices[tr.ea].p));
if (tr.ea == from)
{
tr.ea = to;
}
else if (tr.eb == from)
{
tr.eb = to;
}
else if (tr.ec == from)
{
tr.ec = to;
}
NvVec3 newNormal =
toNvShared(mVertices[tr.eb].p - mVertices[tr.ea].p).cross(toNvShared(mVertices[tr.ec].p - mVertices[tr.ea].p));
if (newNormal.magnitude() < 1e-8f)
{
canBeCollapsed = false;
break;
}
if (oldNormal.dot(newNormal) < 0)
{
canBeCollapsed = false;
break;
}
}
mTriangles[trWithEdge[0]].ea = kNotValidVertexIndex;
if (cntr == 2)
mTriangles[trWithEdge[1]].ea = kNotValidVertexIndex;
for (int32_t i : neighborTriangles)
{
if (mTriangles[i].ea == kNotValidVertexIndex)
continue;
if (mTriangles[i].ea == from)
{
mTriangles[i].ea = to;
mVertexToTriangleMap[from].clear();
mVertexToTriangleMap[to].push_back(i);
}
else if (mTriangles[i].eb == from)
{
mTriangles[i].eb = to;
mVertexToTriangleMap[from].clear();
mVertexToTriangleMap[to].push_back(i);
}
else if (mTriangles[i].ec == from)
{
mTriangles[i].ec = to;
mVertexToTriangleMap[from].clear();
mVertexToTriangleMap[to].push_back(i);
}
}
}
}
void MeshNoiser::divideEdge(int32_t id)
{
if (mTrMeshEdToTr[id].c == 0)
{
return;
}
Edge cEdge = mEdges[id];
EdgeFlag snapRestriction = mEdgeFlag[id];
Vertex middle;
uint32_t nv = kNotValidVertexIndex;
for (int32_t t = 0; t < mTrMeshEdToTr[id].c; ++t)
{
int32_t oldTriangleIndex = mTrMeshEdToTr[id].tr[t];
TriangleIndexed tr = mTriangles[mTrMeshEdToTr[id].tr[t]];
if (tr.ea == kNotValidVertexIndex)
{
continue;
}
uint32_t pbf[3];
pbf[0] = tr.ea;
pbf[1] = tr.eb;
pbf[2] = tr.ec;
for (int32_t p = 0; p < 3; ++p)
{
int32_t pnx = (p + 1) % 3;
int32_t opp = (p + 2) % 3;
if ((pbf[p] == cEdge.s && pbf[pnx] == cEdge.e) || (pbf[p] == cEdge.e && pbf[pnx] == cEdge.s))
{
if (nv == kNotValidVertexIndex)
{
middle.p = (mVertices[pbf[p]].p + mVertices[pbf[pnx]].p) * 0.5f;
middle.n = (mVertices[pbf[p]].n + mVertices[pbf[pnx]].n) * 0.5f;
middle.uv[0] = (mVertices[pbf[p]].uv[0] + mVertices[pbf[pnx]].uv[0]) * 0.5f;
nv = (uint32_t)mVertices.size();
mVertices.push_back(middle);
}
if (nv < mRestrictionFlag.size())
{
mRestrictionFlag[nv] =
((snapRestriction == EXTERNAL_BORDER_EDGE) || (snapRestriction == INTERNAL_BORDER_EDGE));
}
else
{
mRestrictionFlag.push_back((snapRestriction == EXTERNAL_BORDER_EDGE) ||
(snapRestriction == INTERNAL_BORDER_EDGE));
}
uint32_t ind1 = addEdge({ pbf[p], nv });
uint32_t ind2 = addEdge({ nv, pbf[pnx] });
uint32_t ind3 = addEdge({ nv, pbf[opp] });
mEdgeFlag[ind1] = snapRestriction;
mEdgeFlag[ind2] = snapRestriction;
mEdgeFlag[ind3] = INTERNAL_EDGE;
mTrMeshEdToTr[ind1].add(mTrMeshEdToTr[id].tr[t]);
int32_t userInfo = mTriangles[mTrMeshEdToTr[id].tr[t]].userData;
int32_t matId = mTriangles[mTrMeshEdToTr[id].tr[t]].materialId;
int32_t smId = mTriangles[mTrMeshEdToTr[id].tr[t]].smoothingGroup;
mTriangles[mTrMeshEdToTr[id].tr[t]] = {pbf[p], nv, pbf[opp]};
mTriangles[mTrMeshEdToTr[id].tr[t]].userData = userInfo;
mTriangles[mTrMeshEdToTr[id].tr[t]].materialId = matId;
mTriangles[mTrMeshEdToTr[id].tr[t]].smoothingGroup = smId;
mTrMeshEdToTr[ind2].add((int32_t)mTriangles.size());
mTrMeshEdToTr[ind3].add((int32_t)mTrMeshEdToTr[id].tr[t]);
mTrMeshEdToTr[ind3].add((int32_t)mTriangles.size());
mTriangles.push_back({nv, pbf[pnx], pbf[opp]});
mTriangles.back().userData = userInfo;
mTriangles.back().materialId = matId;
mTriangles.back().smoothingGroup = smId;
int32_t ed1 = findEdge({ pbf[pnx], pbf[opp] });
mTrMeshEdToTr[ed1].replace(oldTriangleIndex, (int32_t)mTriangles.size() - 1);
break;
}
}
}
}
float falloffFunction(float x, float mx)
{
float t = (x) / (mx + 1e-6f);
t = std::min(1.0f, t);
return t * t;
}
void MeshNoiser::recalcNoiseDirs()
{
/**
Compute normals direction to apply noise
*/
mVerticesNormalsSmoothed.resize(mVertices.size(), NvVec3(0, 0, 0));
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
if (mTriangles[i].ea == kNotValidVertexIndex)
{
continue;
}
TriangleIndexed& tr = mTriangles[i];
if (tr.userData == 0)
continue;
if (tr.userData < 0)
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ea]] += toNvShared(mVertices[tr.ea].n).getNormalized();
else
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ea]] -= toNvShared(mVertices[tr.ea].n).getNormalized();
if (tr.userData < 0)
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.eb]] += toNvShared(mVertices[tr.eb].n).getNormalized();
else
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.eb]] -= toNvShared(mVertices[tr.eb].n).getNormalized();
if (tr.userData < 0)
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ec]] += toNvShared(mVertices[tr.ec].n).getNormalized();
else
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ec]] -= toNvShared(mVertices[tr.ec].n).getNormalized();
}
for (uint32_t i = 0; i < mVerticesNormalsSmoothed.size(); ++i)
{
mVerticesNormalsSmoothed[i] = mVerticesNormalsSmoothed[mPositionMappedVrt[i]];
mVerticesNormalsSmoothed[i].normalize();
}
}
void MeshNoiser::applyNoise(SimplexNoise& noise, float falloff, int32_t /*relaxIterations*/, float /*relaxFactor*/)
{
NVBLAST_ASSERT(isTesselated);
if (isTesselated == false)
{
return;
}
mRestrictionFlag.clear();
mRestrictionFlag.resize(mVertices.size(), false);
for (uint32_t i = 0; i < mEdges.size(); ++i)
{
if (mTrMeshEdToTr[i].c != 0)
{
if (mEdgeFlag[i] == EXTERNAL_EDGE || mEdgeFlag[i] == EXTERNAL_BORDER_EDGE)
{
mRestrictionFlag[mEdges[i].e] = true;
mRestrictionFlag[mEdges[i].s] = true;
}
}
}
std::vector<Vertex> localVertices = mVertices;
recalcNoiseDirs();
// relax(relaxIterations, relaxFactor, localVertices);
/**
Apply noise
*/
for (uint32_t i = 0; i < localVertices.size(); ++i)
{
if (!mRestrictionFlag[i])
{
float d = noise.sample(toNvShared(localVertices[i].p));
toNvShared(localVertices[i].p) +=
(falloffFunction(mVerticesDistances[i], falloff)) * mVerticesNormalsSmoothed[i] * d;
}
}
/* Recalculate smoothed normals*/
mVerticesNormalsSmoothed.assign(mVerticesNormalsSmoothed.size(), NvVec3(0, 0, 0));
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
if (mTriangles[i].ea == kNotValidVertexIndex)
{
continue;
}
TriangleIndexed& tr = mTriangles[i];
if (tr.userData == 0)
continue;
Triangle pTr(localVertices[tr.ea], localVertices[tr.eb], localVertices[tr.ec]);
NvVec3 nrm = toNvShared(pTr.b.p - pTr.a.p).cross(toNvShared(pTr.c.p - pTr.a.p)).getNormalized();
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ea]] += nrm;
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.eb]] += nrm;
mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ec]] += nrm;
}
for (uint32_t i = 0; i < mVerticesNormalsSmoothed.size(); ++i)
{
mVerticesNormalsSmoothed[i] = mVerticesNormalsSmoothed[mPositionMappedVrt[i]];
mVerticesNormalsSmoothed[i].normalize();
}
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
if (mTriangles[i].ea == kNotValidVertexIndex)
{
continue;
}
TriangleIndexed& tr = mTriangles[i];
if (tr.userData == 0)
continue;
localVertices[tr.ea].n = fromNvShared(mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ea]]);
localVertices[tr.eb].n = fromNvShared(mVerticesNormalsSmoothed[mPositionMappedVrt[tr.eb]]);
localVertices[tr.ec].n = fromNvShared(mVerticesNormalsSmoothed[mPositionMappedVrt[tr.ec]]);
}
mResultTriangles.clear();
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
if (mTriangles[i].ea == kNotValidVertexIndex)
{
continue;
}
mResultTriangles.push_back({ localVertices[mTriangles[i].ea], localVertices[mTriangles[i].eb],
localVertices[mTriangles[i].ec], mTriangles[i].userData, mTriangles[i].materialId,
mTriangles[i].smoothingGroup });
}
}
void MeshNoiser::prebuildTesselatedTriangles()
{
mResultTriangles.clear();
for (uint32_t i = 0; i < mVertices.size(); ++i)
{
mVertices[i].p = mVertices[i].p * mScale + fromNvShared(mOffset);
}
for (uint32_t i = 0; i < mTriangles.size(); ++i)
{
if (mTriangles[i].ea == kNotValidVertexIndex)
{
continue;
}
mResultTriangles.push_back({ mVertices[mTriangles[i].ea], mVertices[mTriangles[i].eb], mVertices[mTriangles[i].ec],
mTriangles[i].userData, mTriangles[i].materialId, mTriangles[i].smoothingGroup });
}
}
std::vector<Triangle> MeshNoiser::getMesh()
{
return mResultTriangles;
}
void MeshNoiser::reset()
{
mVertices.clear();
mTriangles.clear();
mEdges.clear();
mVertMap.clear();
mEdgeMap.clear();
mResultTriangles.clear();
mRestrictionFlag.clear();
mEdgeFlag.clear();
mTrMeshEdToTr.clear();
mVertexValence.clear();
mVertexToTriangleMap.clear();
mVerticesDistances.clear();
mVerticesNormalsSmoothed.clear();
mPositionMappedVrt.clear();
mGeometryGraph.clear();
isTesselated = false;
mOffset = NvVec3(0, 0, 0);
mScale = 1.0f;
} |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBondGeneratorImpl.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2022-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTEXTAUTHORINGBONDGENERATORIMPL_H
#define NVBLASTEXTAUTHORINGBONDGENERATORIMPL_H
#include "NvBlastExtAuthoringBondGenerator.h"
#include "NvBlastExtAuthoringFractureTool.h"
#include "NvPlane.h"
#include <NvBlastExtAuthoringConvexMeshBuilder.h>
#include <vector>
#include <set>
namespace Nv
{
namespace Blast
{
/**
Tool for gathering bond information from provided mesh geometry
*/
class BlastBondGeneratorImpl : public BlastBondGenerator
{
public:
BlastBondGeneratorImpl(ConvexMeshBuilder* builder)
: mConvexMeshBuilder(builder) {};
virtual void release() override;
virtual int32_t buildDescFromInternalFracture(FractureTool* tool, const bool* chunkIsSupport,
NvBlastBondDesc*& resultBondDescs, NvBlastChunkDesc*& resultChunkDescriptors) override;
virtual int32_t createBondBetweenMeshes(uint32_t meshACount, const Triangle* meshA, uint32_t meshBCount, const Triangle* meshB,
NvBlastBond& resultBond, BondGenerationConfig conf) override;
virtual int32_t createBondBetweenMeshes(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry,
uint32_t overlapsCount, const uint32_t* overlapsA, const uint32_t* overlapsB,
NvBlastBondDesc*& resultBond, BondGenerationConfig cfg) override;
virtual int32_t bondsFromPrefractured(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry,
const bool* chunkIsSupport, NvBlastBondDesc*& resultBondDescs,
BondGenerationConfig conf) override;
virtual int32_t bondsFromPrefractured(uint32_t meshCount, const uint32_t* convexHullOffset, const CollisionHull** chunkHulls,
const bool* chunkIsSupport, const uint32_t* meshGroups, NvBlastBondDesc*& resultBondDescs, float maxSeparation) override;
private:
float processWithMidplanes(TriangleProcessor* trProcessor, const Triangle* mA, uint32_t mavc, const Triangle* mB, uint32_t mbvc, const CollisionHull* hull1, const CollisionHull* hull2,
const std::vector<nvidia::NvVec3>& hull1p, const std::vector<nvidia::NvVec3>& hull2p,
nvidia::NvVec3& normal, nvidia::NvVec3& centroid, float maxRelSeparation);
int32_t createFullBondListAveraged( uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry, const CollisionHull** chunkHulls,
const bool* supportFlags, const uint32_t* meshGroups, NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf, std::set<std::pair<uint32_t, uint32_t> >* pairNotToTest = nullptr);
int32_t createFullBondListExact( uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry,
const bool* supportFlags, NvBlastBondDesc*& resultBondDescs, BondGenerationConfig conf);
int32_t createFullBondListExactInternal(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry,
std::vector<PlaneChunkIndexer>& planeTriangleMapping , NvBlastBondDesc*& resultBondDescs);
int32_t createBondForcedInternal( const std::vector<nvidia::NvVec3>& hull0, const std::vector<nvidia::NvVec3>& hull1,const CollisionHull& cHull0,
const CollisionHull& cHull1, nvidia::NvBounds3 bound0, nvidia::NvBounds3 bound1, NvBlastBond& resultBond, float overlapping);
void buildGeometryCache(uint32_t meshCount, const uint32_t* geometryOffset, const Triangle* geometry);
void resetGeometryCache();
ConvexMeshBuilder* mConvexMeshBuilder;
std::vector<std::vector<Triangle> > mGeometryCache;
std::vector<PlaneChunkIndexer> mPlaneCache;
std::vector<CollisionHull*> mCHullCache;
std::vector<std::vector<nvidia::NvVec3> > mHullsPointsCache;
std::vector<nvidia::NvBounds3 > mBoundsCache;
};
} // namespace Blast
} // namespace Nv
#endif // NVBLASTEXTAUTHORINGBONDGENERATORIMPL_H |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtApexSharedParts.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTEXTAPEXSHAREDPARTS_H
#define NVBLASTEXTAPEXSHAREDPARTS_H
#include "NvBlast.h"
#include "NvPlane.h"
namespace nvidia
{
class NvVec3;
class NvTransform;
class NvBounds3;
}
namespace Nv
{
namespace Blast
{
struct Separation
{
nvidia::NvPlane plane;
float min0, max0, min1, max1;
float getDistance()
{
return nvidia::NvMax(min0 - max1, min1 - max0);
}
};
/**
Function to compute midplane between two convex hulls. Is copied from APEX.
*/
bool importerHullsInProximityApexFree( uint32_t hull0Count, const nvidia::NvVec3* hull0, nvidia::NvBounds3& hull0Bounds, const nvidia::NvTransform& localToWorldRT0In, const nvidia::NvVec3& scale0In,
uint32_t hull1Count, const nvidia::NvVec3* hull1, nvidia::NvBounds3& hull1Bounds, const nvidia::NvTransform& localToWorldRT1In, const nvidia::NvVec3& scale1In,
float maxDistance, Separation* separation);
} // namespace Blast
} // namespace Nv
#endif // NVBLASTEXTAPEXSHAREDPARTS_H
|
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoring.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastExtAuthoring.h"
#include "NvBlastTypes.h"
#include "NvBlastIndexFns.h"
#include "NvBlast.h"
#include "NvBlastAssert.h"
#include "NvBlastGlobals.h"
#include "NvBlastExtAssetUtils.h"
#include "NvBlastExtAuthoringPatternGeneratorImpl.h"
#include "NvBlastExtAuthoringBooleanToolImpl.h"
#include "NvBlastExtAuthoringAcceleratorImpl.h"
#include "NvBlastExtAuthoringMeshImpl.h"
#include "NvBlastExtAuthoringMeshCleanerImpl.h"
#include "NvBlastExtAuthoringFractureToolImpl.h"
#include "NvBlastExtAuthoringBondGeneratorImpl.h"
#include "NvBlastExtAuthoringCollisionBuilderImpl.h"
#include "NvBlastExtAuthoringCutoutImpl.h"
#include "NvBlastExtAuthoringInternalCommon.h"
#include "NvBlastNvSharedHelpers.h"
#include <algorithm>
#include <memory>
using namespace Nv::Blast;
using namespace nvidia;
#define SAFE_ARRAY_NEW(T, x) ((x) > 0) ? reinterpret_cast<T*>(NVBLAST_ALLOC(sizeof(T) * (x))) : nullptr;
#define SAFE_ARRAY_DELETE(x) if (x != nullptr) {NVBLAST_FREE(x); x = nullptr;}
Mesh* NvBlastExtAuthoringCreateMesh(const NvcVec3* position, const NvcVec3* normals, const NvcVec2* uv, uint32_t verticesCount, const uint32_t* indices, uint32_t indicesCount)
{
return new MeshImpl(position, normals, uv, verticesCount, indices, indicesCount);
}
Mesh* NvBlastExtAuthoringCreateMeshOnlyTriangles(const void* Vertices, uint32_t vcount, uint32_t* indices, uint32_t indexCount, void* materials, uint32_t materialStride)
{
return new MeshImpl((Vertex*)Vertices, vcount, indices, indexCount, materials, materialStride);
}
Mesh* NvBlastExtAuthoringCreateMeshFromFacets(const void* vertices, const void* edges, const void* facets, uint32_t verticesCount, uint32_t edgesCount, uint32_t facetsCount)
{
return new MeshImpl((Vertex*)vertices, (Edge*)edges, (Facet*)facets, verticesCount, edgesCount, facetsCount);
}
MeshCleaner* NvBlastExtAuthoringCreateMeshCleaner()
{
return new MeshCleanerImpl;
}
VoronoiSitesGenerator* NvBlastExtAuthoringCreateVoronoiSitesGenerator(Mesh* mesh, RandomGeneratorBase* rng)
{
return new VoronoiSitesGeneratorImpl(mesh, rng);
}
CutoutSet* NvBlastExtAuthoringCreateCutoutSet()
{
return new CutoutSetImpl();
}
void NvBlastExtAuthoringBuildCutoutSet(CutoutSet& cutoutSet, const uint8_t* pixelBuffer, uint32_t bufferWidth, uint32_t bufferHeight,
float segmentationErrorThreshold, float snapThreshold, bool periodic, bool expandGaps)
{
::createCutoutSet(*(CutoutSetImpl*)&cutoutSet, pixelBuffer, bufferWidth, bufferHeight, segmentationErrorThreshold, snapThreshold, periodic, expandGaps);
}
FractureTool* NvBlastExtAuthoringCreateFractureTool()
{
return new FractureToolImpl;
}
BlastBondGenerator* NvBlastExtAuthoringCreateBondGenerator(Nv::Blast::ConvexMeshBuilder* builder)
{
return new BlastBondGeneratorImpl(builder);
}
int32_t NvBlastExtAuthoringBuildMeshConvexDecomposition(ConvexMeshBuilder* cmb, const Nv::Blast::Triangle* mesh,
uint32_t triangleCount,
const ConvexDecompositionParams& params,
CollisionHull**& convexes)
{
NVBLAST_ASSERT(cmb != nullptr);
return buildMeshConvexDecomposition(*cmb, mesh, triangleCount, params, convexes);
}
void NvBlastExtAuthoringTrimCollisionGeometry(ConvexMeshBuilder* cmb, uint32_t chunksCount,
Nv::Blast::CollisionHull** in, const uint32_t* chunkDepth)
{
return trimCollisionGeometry(*cmb, chunksCount, in, chunkDepth);
}
void NvBlastExtAuthoringTransformCollisionHullInPlace(CollisionHull* hull, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation)
{
// Local copies of scaling (S), rotation (R), and translation (T)
nvidia::NvVec3 S = { 1, 1, 1 };
nvidia::NvQuat R = { 0, 0, 0, 1 };
nvidia::NvVec3 T = { 0, 0, 0 };
nvidia::NvVec3 cofS = { 1, 1, 1 };
float sgnDetS = 1;
{
if (rotation)
{
R = *toNvShared(rotation);
}
if (scaling)
{
S = *toNvShared(scaling);
cofS.x = S.y * S.z;
cofS.y = S.z * S.x;
cofS.z = S.x * S.y;
sgnDetS = (S.x * S.y * S.z < 0) ? -1 : 1;
}
if (translation)
{
T = *toNvShared(translation);
}
}
const uint32_t pointCount = hull->pointsCount;
for (uint32_t pi = 0; pi < pointCount; pi++)
{
nvidia::NvVec3& p = toNvShared(hull->points[pi]);
p = (R.rotate(p.multiply(S)) + T);
}
const uint32_t planeCount = hull->polygonDataCount;
for (uint32_t pi = 0; pi < planeCount; pi++)
{
float* plane = hull->polygonData[pi].plane;
nvidia::NvPlane nvPlane(plane[0], plane[1], plane[2], plane[3]);
NvVec3 transformedNormal = sgnDetS*R.rotate(nvPlane.n.multiply(cofS)).getNormalized();
NvVec3 transformedPt = R.rotate(nvPlane.pointInPlane().multiply(S)) + T;
nvidia::NvPlane transformedPlane(transformedPt, transformedNormal);
plane[0] = transformedPlane.n[0];
plane[1] = transformedPlane.n[1];
plane[2] = transformedPlane.n[2];
plane[3] = transformedPlane.d;
}
}
CollisionHull* NvBlastExtAuthoringTransformCollisionHull(const CollisionHull* hull, const NvcVec3* scaling, const NvcQuat* rotation, const NvcVec3* translation)
{
CollisionHull* ret = new CollisionHull(*hull);
ret->points = SAFE_ARRAY_NEW(NvcVec3, ret->pointsCount);
ret->indices = SAFE_ARRAY_NEW(uint32_t, ret->indicesCount);
ret->polygonData = SAFE_ARRAY_NEW(HullPolygon, ret->polygonDataCount);
memcpy(ret->points, hull->points, sizeof(ret->points[0]) * ret->pointsCount);
memcpy(ret->indices, hull->indices, sizeof(ret->indices[0]) * ret->indicesCount);
memcpy(ret->polygonData, hull->polygonData, sizeof(ret->polygonData[0]) * ret->polygonDataCount);
NvBlastExtAuthoringTransformCollisionHullInPlace(ret, scaling, rotation, translation);
return ret;
}
void buildPhysicsChunks(ConvexMeshBuilder& collisionBuilder, AuthoringResult& result, const ConvexDecompositionParams& params, uint32_t chunksToProcessCount = 0, uint32_t* chunksToProcess = nullptr)
{
uint32_t chunkCount = (uint32_t)result.chunkCount;
if (params.maximumNumberOfHulls == 1)
{
result.collisionHullOffset = SAFE_ARRAY_NEW(uint32_t, chunkCount + 1);
result.collisionHullOffset[0] = 0;
result.collisionHull = SAFE_ARRAY_NEW(CollisionHull*, chunkCount);
for (uint32_t i = 0; i < chunkCount; ++i)
{
std::vector<NvcVec3> vertices;
for (uint32_t p = result.geometryOffset[i]; p < result.geometryOffset[i + 1]; ++p)
{
Nv::Blast::Triangle& tri = result.geometry[p];
vertices.push_back(tri.a.p);
vertices.push_back(tri.b.p);
vertices.push_back(tri.c.p);
}
result.collisionHullOffset[i + 1] = result.collisionHullOffset[i] + 1;
result.collisionHull[i] = collisionBuilder.buildCollisionGeometry((uint32_t)vertices.size(), vertices.data());
}
}
else
{
std::set<int32_t> chunkSet;
for (uint32_t c = 0; c < chunksToProcessCount; c++)
{
chunkSet.insert(chunksToProcess[c]);
}
std::vector<std::vector<CollisionHull*> > hulls(chunkCount);
int32_t totalHulls = 0;
for (uint32_t i = 0; i < chunkCount; ++i)
{
if (chunkSet.size() > 0 && chunkSet.find(i) == chunkSet.end())
{
int32_t newHulls = result.collisionHullOffset[i + 1] - result.collisionHullOffset[i];
int32_t off = result.collisionHullOffset[i];
for (int32_t subhull = 0; subhull < newHulls; ++subhull)
{
hulls[i].push_back(result.collisionHull[off + subhull]);
}
totalHulls += newHulls;
continue;
}
CollisionHull** tempHull;
int32_t newHulls =
buildMeshConvexDecomposition(collisionBuilder, result.geometry + result.geometryOffset[i],
result.geometryOffset[i + 1] - result.geometryOffset[i], params, tempHull);
totalHulls += newHulls;
for (int32_t h = 0; h < newHulls; ++h)
{
hulls[i].push_back(tempHull[h]);
}
SAFE_ARRAY_DELETE(tempHull);
}
result.collisionHullOffset = SAFE_ARRAY_NEW(uint32_t, chunkCount + 1);
result.collisionHullOffset[0] = 0;
result.collisionHull = SAFE_ARRAY_NEW(CollisionHull*, totalHulls);
for (uint32_t i = 0; i < chunkCount; ++i)
{
result.collisionHullOffset[i + 1] = result.collisionHullOffset[i] + hulls[i].size();
int32_t off = result.collisionHullOffset[i];
for (uint32_t subhull = 0; subhull < hulls[i].size(); ++subhull)
{
result.collisionHull[off + subhull] = hulls[i][subhull];
}
}
}
}
void NvBlastExtAuthoringReleaseAuthoringResultCollision(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar)
{
if (ar->collisionHull != nullptr)
{
for (uint32_t ch = 0; ch < ar->collisionHullOffset[ar->chunkCount]; ch++)
{
collisionBuilder.releaseCollisionHull(ar->collisionHull[ch]);
}
SAFE_ARRAY_DELETE(ar->collisionHullOffset);
SAFE_ARRAY_DELETE(ar->collisionHull);
}
}
void NvBlastExtAuthoringReleaseAuthoringResult(Nv::Blast::ConvexMeshBuilder& collisionBuilder, Nv::Blast::AuthoringResult* ar)
{
NvBlastExtAuthoringReleaseAuthoringResultCollision(collisionBuilder, ar);
if (ar->asset)
{
NVBLAST_FREE(ar->asset);
ar->asset = nullptr;
}
SAFE_ARRAY_DELETE(ar->assetToFractureChunkIdMap);
SAFE_ARRAY_DELETE(ar->geometryOffset);
SAFE_ARRAY_DELETE(ar->geometry);
SAFE_ARRAY_DELETE(ar->chunkDescs);
SAFE_ARRAY_DELETE(ar->bondDescs);
delete ar;
}
static float getGeometryVolumeAndCentroid(NvcVec3& centroid, const Nv::Blast::Triangle* tris, size_t triCount)
{
class GeometryQuery
{
public:
GeometryQuery(const Nv::Blast::Triangle* tris, size_t triCount) : m_tris(tris), m_triCount(triCount) {}
size_t faceCount() const { return m_triCount; }
size_t vertexCount(size_t faceIndex) const { NV_UNUSED(faceIndex); return 3; }
NvcVec3 vertex(size_t faceIndex, size_t vertexIndex) const
{
const Nv::Blast::Triangle& tri = m_tris[faceIndex];
switch (vertexIndex)
{
case 0: return tri.a.p;
case 1: return tri.b.p;
case 2: return tri.c.p;
}
return NvcVec3({0.0f, 0.0f, 0.0f});
}
const Nv::Blast::Triangle* m_tris;
size_t m_triCount;
};
return calculateMeshVolumeAndCentroid<GeometryQuery>(centroid, {tris, triCount});
}
AuthoringResult* NvBlastExtAuthoringProcessFracture(FractureTool& fTool, BlastBondGenerator& bondGenerator, ConvexMeshBuilder& collisionBuilder, const ConvexDecompositionParams& collisionParam, int32_t defaultSupportDepth)
{
fTool.finalizeFracturing();
const uint32_t chunkCount = fTool.getChunkCount();
if (chunkCount == 0)
{
return nullptr;
}
AuthoringResult* ret = new AuthoringResult;
if (ret == nullptr)
{
return nullptr;
}
AuthoringResult& aResult = *ret;
aResult.chunkCount = chunkCount;
std::shared_ptr<bool> isSupport(new bool[chunkCount], [](bool* b) {delete[] b; });
memset(isSupport.get(), 0, sizeof(bool) * chunkCount);
for (uint32_t i = 0; i < fTool.getChunkCount(); ++i)
{
if (defaultSupportDepth < 0 || fTool.getChunkDepth(fTool.getChunkId(i)) < defaultSupportDepth)
{
isSupport.get()[i] = fTool.getChunkInfo(i).isLeaf;
}
else if (fTool.getChunkDepth(fTool.getChunkId(i)) == defaultSupportDepth)
{
isSupport.get()[i] = true;
}
}
const uint32_t bondCount = bondGenerator.buildDescFromInternalFracture(&fTool, isSupport.get(), aResult.bondDescs, aResult.chunkDescs);
aResult.bondCount = bondCount;
if (bondCount == 0)
{
aResult.bondDescs = nullptr;
}
// order chunks, build map
std::vector<uint32_t> chunkReorderInvMap;
{
std::vector<uint32_t> chunkReorderMap(chunkCount);
std::vector<char> scratch(chunkCount * sizeof(NvBlastChunkDesc));
NvBlastEnsureAssetExactSupportCoverage(aResult.chunkDescs, chunkCount, scratch.data(), logLL);
NvBlastBuildAssetDescChunkReorderMap(chunkReorderMap.data(), aResult.chunkDescs, chunkCount, scratch.data(), logLL);
NvBlastApplyAssetDescChunkReorderMapInPlace(aResult.chunkDescs, chunkCount, aResult.bondDescs, bondCount, chunkReorderMap.data(), true, scratch.data(), logLL);
chunkReorderInvMap.resize(chunkReorderMap.size());
Nv::Blast::invertMap(chunkReorderInvMap.data(), chunkReorderMap.data(), static_cast<unsigned int>(chunkReorderMap.size()));
}
// get result geometry
aResult.geometryOffset = SAFE_ARRAY_NEW(uint32_t, chunkCount + 1);
aResult.assetToFractureChunkIdMap = SAFE_ARRAY_NEW(uint32_t, chunkCount + 1);
aResult.geometryOffset[0] = 0;
std::vector<Nv::Blast::Triangle*> chunkGeometry(chunkCount);
for (uint32_t i = 0; i < chunkCount; ++i)
{
uint32_t chunkInfoIndex = chunkReorderInvMap[i];
aResult.geometryOffset[i+1] = aResult.geometryOffset[i] + fTool.getBaseMesh(chunkInfoIndex, chunkGeometry[i]);
aResult.assetToFractureChunkIdMap[i] = fTool.getChunkId(chunkInfoIndex);
}
aResult.geometry = SAFE_ARRAY_NEW(Triangle, aResult.geometryOffset[chunkCount]);
for (uint32_t i = 0; i < chunkCount; ++i)
{
uint32_t trianglesCount = aResult.geometryOffset[i + 1] - aResult.geometryOffset[i];
memcpy(aResult.geometry + aResult.geometryOffset[i], chunkGeometry[i], trianglesCount * sizeof(Nv::Blast::Triangle));
delete chunkGeometry[i];
chunkGeometry[i] = nullptr;
}
float maxX = FLT_MAX;
float maxY = FLT_MAX;
float maxZ = FLT_MAX;
float minX = -FLT_MAX;
float minY = -FLT_MAX;
float minZ = -FLT_MAX;
for (uint32_t i = 0; i < bondCount; i++)
{
NvBlastBondDesc& bondDesc = aResult.bondDescs[i];
minX = std::min(minX, bondDesc.bond.centroid[0]);
maxX = std::max(maxX, bondDesc.bond.centroid[0]);
minY = std::min(minY, bondDesc.bond.centroid[1]);
maxY = std::max(maxY, bondDesc.bond.centroid[1]);
minZ = std::min(minZ, bondDesc.bond.centroid[2]);
maxZ = std::max(maxZ, bondDesc.bond.centroid[2]);
}
// prepare physics data (convexes)
buildPhysicsChunks(collisionBuilder, aResult, collisionParam);
// set NvBlastChunk volume and centroid from CollisionHull
for (uint32_t i = 0; i < chunkCount; i++)
{
float totalVolume = 0.f;
NvcVec3 totalCentroid = {0.0f, 0.0f, 0.0f};
for (uint32_t k = aResult.collisionHullOffset[i]; k < aResult.collisionHullOffset[i+1]; k++)
{
const CollisionHull* hull = aResult.collisionHull[k];
if (hull)
{
NvcVec3 centroid;
const float volume = calculateCollisionHullVolumeAndCentroid(centroid, *hull);
totalVolume += volume;
totalCentroid = totalCentroid + volume*centroid;
}
else
{
totalVolume = 0.0f; // Found a null hull, signal this with zero volume
break;
}
}
if (totalVolume > 0.0f)
{
totalCentroid = totalCentroid / totalVolume;
aResult.chunkDescs[i].volume = totalVolume;
aResult.chunkDescs[i].centroid[0] = totalCentroid.x;
aResult.chunkDescs[i].centroid[1] = totalCentroid.y;
aResult.chunkDescs[i].centroid[2] = totalCentroid.z;
}
else
{
// Fallback to using mesh
size_t triCount = aResult.geometryOffset[i+1] - aResult.geometryOffset[i];
const Nv::Blast::Triangle* tris = aResult.geometry + aResult.geometryOffset[i];
NvcVec3 centroid;
aResult.chunkDescs[i].volume = getGeometryVolumeAndCentroid(centroid, tris, triCount);
aResult.chunkDescs[i].centroid[0] = centroid.x;
aResult.chunkDescs[i].centroid[1] = centroid.y;
aResult.chunkDescs[i].centroid[2] = centroid.z;
}
}
// build and serialize ExtPhysicsAsset
NvBlastAssetDesc descriptor;
descriptor.bondCount = bondCount;
descriptor.bondDescs = aResult.bondDescs;
descriptor.chunkCount = chunkCount;
descriptor.chunkDescs = aResult.chunkDescs;
std::vector<uint8_t> scratch(static_cast<unsigned int>(NvBlastGetRequiredScratchForCreateAsset(&descriptor, logLL)));
void* mem = NVBLAST_ALLOC(NvBlastGetAssetMemorySize(&descriptor, logLL));
aResult.asset = NvBlastCreateAsset(mem, &descriptor, scratch.data(), logLL);
//aResult.asset = std::shared_ptr<NvBlastAsset>(asset, [=](NvBlastAsset* asset)
//{
// NVBLAST_FREE(asset);
//});
//std::cout << "Done" << std::endl;
ret->materialCount = 0;
ret->materialNames = nullptr;
return ret;
}
uint32_t NvBlastExtAuthoringFindAssetConnectingBonds
(
const NvBlastAsset** components,
const NvcVec3* scales,
const NvcQuat* rotations,
const NvcVec3* translations,
const uint32_t** convexHullOffsets,
const CollisionHull*** chunkHulls,
uint32_t componentCount,
NvBlastExtAssetUtilsBondDesc*& newBondDescs,
float maxSeparation
)
{
//We don't need to use any of the cooking related parts of this
BlastBondGeneratorImpl bondGenerator(nullptr);
std::vector<uint32_t> componentChunkOffsets;
componentChunkOffsets.reserve(componentCount + 1);
componentChunkOffsets.push_back(0);
std::vector<uint32_t> combinedConvexHullOffsets;
std::vector<const CollisionHull*> combinedConvexHulls;
std::vector<CollisionHull*> hullsToRelease;
combinedConvexHullOffsets.push_back(0);
std::vector<uint32_t> originalComponentIndex;
const nvidia::NvVec3 identityScale(1);
//Combine our hull lists into a single combined list for bondsFromPrefractured
for (uint32_t c = 0; c < componentCount; c++)
{
const uint32_t chunkCount = NvBlastAssetGetChunkCount(components[c], &logLL);
const NvcVec3* scale = scales ? scales + c : nullptr;
const NvcQuat* rotation = rotations ? rotations + c : nullptr;
const NvcVec3* translation = translations ? translations + c : nullptr;
componentChunkOffsets.push_back(chunkCount + componentChunkOffsets.back());
for (uint32_t chunk = 0; chunk < chunkCount; chunk++)
{
const uint32_t hullsStart = convexHullOffsets[c][chunk];
const uint32_t hullsEnd = convexHullOffsets[c][chunk + 1];
for (uint32_t hull = hullsStart; hull < hullsEnd; hull++)
{
if ((scale != nullptr && *toNvShared(scale) != identityScale) ||
(rotation != nullptr && !toNvShared(rotation)->isIdentity()) ||
(translation != nullptr && !toNvShared(translation)->isZero()))
{
hullsToRelease.emplace_back(NvBlastExtAuthoringTransformCollisionHull(chunkHulls[c][hull], scale, rotation, translation));
combinedConvexHulls.emplace_back(hullsToRelease.back());
}
else
{
//No need to transform
combinedConvexHulls.emplace_back(chunkHulls[c][hull]);
}
}
combinedConvexHullOffsets.push_back((hullsEnd - hullsStart) + combinedConvexHullOffsets.back());
originalComponentIndex.push_back(c);
}
}
const uint32_t totalChunkCount = componentChunkOffsets.back();
//Can't use std::vector<bool> since we need a bool* later
std::unique_ptr<bool[]> isSupportChunk(new bool[totalChunkCount]);
for (uint32_t c = 0; c < componentCount; c++)
{
const uint32_t chunkCount = componentChunkOffsets[c + 1] - componentChunkOffsets[c];
NvBlastSupportGraph supportGraph = NvBlastAssetGetSupportGraph(components[c], &logLL);
for (uint32_t chunk = 0; chunk < chunkCount; chunk++)
{
auto chunkIndiciesEnd = supportGraph.chunkIndices + supportGraph.nodeCount;
isSupportChunk[chunk + componentChunkOffsets[c]] = (std::find(supportGraph.chunkIndices, chunkIndiciesEnd, chunk) != chunkIndiciesEnd);
}
}
//Find the bonds
NvBlastBondDesc* newBonds = nullptr;
const int32_t newBoundCount = bondGenerator.bondsFromPrefractured(totalChunkCount, combinedConvexHullOffsets.data(), combinedConvexHulls.data(), isSupportChunk.get(), originalComponentIndex.data(), newBonds, maxSeparation);
//Convert the bonds back to per-component chunks
newBondDescs = SAFE_ARRAY_NEW(NvBlastExtAssetUtilsBondDesc, newBoundCount);
for (int32_t nb = 0; nb < newBoundCount; ++nb)
{
newBondDescs[nb].bond = newBonds[nb].bond;
for (uint32_t ci = 0; ci < 2; ++ci)
{
uint32_t absChunkIdx = newBonds[nb].chunkIndices[ci];
uint32_t componentIdx = originalComponentIndex[absChunkIdx];
newBondDescs[nb].componentIndices[ci] = componentIdx;
newBondDescs[nb].chunkIndices[ci] = absChunkIdx - componentChunkOffsets[componentIdx];
}
}
//Don't need this anymore
NVBLAST_FREE(newBonds);
// These hulls were generated by NvBlastExtAuthoringTransformCollisionHull, which uses SAFE_ARRAY_NEW
// to allocate the arrays referenced in each hull. Be sure to delete the array pointers here before
// deleting the CollisionHull structs.
for (CollisionHull* hull : hullsToRelease)
{
SAFE_ARRAY_DELETE(hull->indices);
SAFE_ARRAY_DELETE(hull->points);
SAFE_ARRAY_DELETE(hull->polygonData);
delete hull;
}
return newBoundCount;
}
void NvBlastExtAuthoringUpdateGraphicsMesh(Nv::Blast::FractureTool& fTool, Nv::Blast::AuthoringResult& aResult)
{
uint32_t chunkCount = fTool.getChunkCount();
for (uint32_t i = 0; i < chunkCount; ++i)
{
fTool.updateBaseMesh(fTool.getChunkInfoIndex(aResult.assetToFractureChunkIdMap[i]), aResult.geometry + aResult.geometryOffset[i]);
}
}
void NvBlastExtAuthoringBuildCollisionMeshes(Nv::Blast::AuthoringResult& ares, Nv::Blast::ConvexMeshBuilder& collisionBuilder,
const Nv::Blast::ConvexDecompositionParams& collisionParam, uint32_t chunksToProcessCount, uint32_t* chunksToProcess)
{
buildPhysicsChunks(collisionBuilder, ares, collisionParam, chunksToProcessCount, chunksToProcess);
}
PatternGenerator* NvBlastExtAuthoringCreatePatternGenerator()
{
return NVBLAST_NEW(PatternGeneratorImpl);
}
SpatialGrid* NvBlastExtAuthoringCreateSpatialGrid(uint32_t resolution, const Mesh* m)
{
Grid* g = NVBLAST_NEW(Grid)(resolution);
g->setMesh(m);
return g;
}
SpatialAccelerator* NvBlastExtAuthoringCreateGridAccelerator(SpatialGrid* parentGrid)
{
return NVBLAST_NEW(GridAccelerator)((Grid*)parentGrid);
}
SpatialAccelerator* NvBlastExtAuthoringCreateSweepingAccelerator(const Mesh* m)
{
return NVBLAST_NEW(SweepingAccelerator)(m);
}
SpatialAccelerator* NvBlastExtAuthoringCreateBBoxBasedAccelerator(uint32_t resolution, const Mesh* m)
{
return NVBLAST_NEW(BBoxBasedAccelerator)(m, resolution);
}
BooleanTool* NvBlastExtAuthoringCreateBooleanTool()
{
return new BooleanToolImpl;
}
|
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtTriangleProcessor.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvBlastExtTriangleProcessor.h"
#include "NvBlastExtAuthoringInternalCommon.h"
#define COLLIN_EPS 1e-4f
#define V_COMP_EPS 1e-5f
using namespace nvidia;
namespace Nv
{
namespace Blast
{
/**
Segments bounding box interseciton test
*/
bool boundingRectangleIntersection(const NvVec2& s1, const NvVec2& e1, const NvVec2& s2, const NvVec2& e2)
{
// sl1/sl2 is always left bottom end of rectangle
// se1/el2 is always right top end of rectangle
float sl1, sl2, el1, el2;
if (s1.x < e1.x)
{
sl1 = s1.x;
el1 = e1.x;
}
else
{
el1 = s1.x;
sl1 = e1.x;
}
if (s2.x < e2.x)
{
sl2 = s2.x;
el2 = e2.x;
}
else
{
el2 = s2.x;
sl2 = e2.x;
}
if (NvMax(sl1, sl2) > NvMin(el1, el2))
return false;
if (s1.y < e1.y)
{
sl1 = s1.y;
el1 = e1.y;
}
else
{
el1 = s1.y;
sl1 = e1.y;
}
if (s2.y < e2.y)
{
sl2 = s2.y;
el2 = e2.y;
}
else
{
el2 = s2.y;
sl2 = e2.y;
}
if (NvMax(sl1, sl2) > NvMin(el1, el2))
return false;
return true;
}
inline float getRotation(NvVec2 a, NvVec2 b)
{
return a.x * b.y - a.y * b.x;
}
inline float getParameter(const NvVec2& a, const NvVec2& b, const NvVec2& point)
{
return (point - a).magnitude() / (b - a).magnitude();
}
inline NvVec3 lerp3D(const NvVec3& a, const NvVec3& b, const float t)
{
return (b - a) * t + a;
}
struct Line2D
{
NvVec2 normal;
float c;
Line2D(NvVec2 vec, NvVec2 point)
{
normal.x = vec.y;
normal.y = -vec.x;
c = -normal.dot(point);
}
};
uint32_t TriangleProcessor::getSegmentIntersection(const NvVec2& s1, const NvVec2& e1, const NvVec2& s2, const NvVec2& e2, float& t1)
{
if (!boundingRectangleIntersection(s1, e1, s2, e2))
return 0;
NvVec2 vec1 = e1 - s1;
NvVec2 vec2 = e2 - s2;
float det1 = getRotation(vec1, vec2);
if (NvAbs(det1) < COLLIN_EPS)
{
return 0;
}
Line2D lineA(vec1, s1);
Line2D lineB(vec2, s2);
NvVec2 fInt;
float detX = lineA.normal.y * lineB.c - lineA.c * lineB.normal.y;
float detY = lineA.c * lineB.normal.x - lineB.c * lineA.normal.x;
float x = detX / det1;
float y = detY / det1;
if (x + V_COMP_EPS >= NvMax(NvMin(s1.x, e1.x), NvMin(s2.x, e2.x)) &&
x - V_COMP_EPS <= NvMin(NvMax(s1.x, e1.x), NvMax(s2.x, e2.x)) &&
y + V_COMP_EPS >= NvMax(NvMin(s1.y, e1.y), NvMin(s2.y, e2.y)) &&
y - V_COMP_EPS <= NvMin(NvMax(s1.y, e1.y), NvMax(s2.y, e2.y)))
{
fInt.x = x;
fInt.y = y;
t1 = getParameter(s1, e1, fInt);
return 1;
}
return 0;
}
struct cwComparer
{
NvVec3 basePoint;
NvVec3 normal;
cwComparer(NvVec3 basePointIn, NvVec3 norm)
{
basePoint = basePointIn;
normal = norm;
};
bool operator()(const NvVec3& a, const NvVec3& b)
{
NvVec3 norm = (a - basePoint).cross(b - basePoint);
return normal.dot(norm) > 0;
}
};
bool vec3Comparer(const NvVec3& a, const NvVec3& b)
{
if (a.x + V_COMP_EPS < b.x) return true;
if (a.x - V_COMP_EPS > b.x) return false;
if (a.y + V_COMP_EPS < b.y) return true;
if (a.y - V_COMP_EPS > b.y) return false;
if (a.z + V_COMP_EPS < b.z) return true;
return false;
}
void TriangleProcessor::sortToCCW(std::vector<NvVec3>& points, NvVec3& normal)
{
std::sort(points.begin(), points.end(), vec3Comparer);
int lastUnique = 0;
for (uint32_t i = 1; i < points.size(); ++i)
{
NvVec3 df = (points[i] - points[lastUnique]).abs();
if (df.x > V_COMP_EPS || df.y > V_COMP_EPS || df.z > V_COMP_EPS)
{
points[++lastUnique] = points[i];
}
}
points.resize(lastUnique + 1);
if (points.size() > 2)
{
cwComparer compr(points[0], normal);
std::sort(points.begin() + 1, points.end(), compr);
}
}
void TriangleProcessor::buildConvexHull(std::vector<NvVec3>& points, std::vector<NvVec3>& convexHull,const NvVec3& normal)
{
std::sort(points.begin(), points.end(), vec3Comparer);
int lastUnique = 0;
for (uint32_t i = 1; i < points.size(); ++i)
{
NvVec3 df = (points[i] - points[lastUnique]).abs();
if (df.x > V_COMP_EPS || df.y > V_COMP_EPS || df.z > V_COMP_EPS)
{
points[++lastUnique] = points[i];
}
}
points.resize(lastUnique + 1);
if (points.size() > 2)
{
cwComparer compr(points[0], normal);
std::sort(points.begin() + 1, points.end(), compr);
}
if (points.size() < 3)
return;
convexHull.push_back(points[0]);
convexHull.push_back(points[1]);
ProjectionDirections projectionDirection = getProjectionDirection(normal);
for (uint32_t i = 2; i < points.size(); ++i)
{
NvVec2 pnt = getProjectedPointWithWinding(points[i], projectionDirection);
NvVec2 vec = pnt - getProjectedPointWithWinding(convexHull.back(), projectionDirection);
if (NvAbs(vec.x) < V_COMP_EPS && NvAbs(vec.y) < V_COMP_EPS)
{
continue;
}
if (getRotation(vec, getProjectedPointWithWinding(convexHull.back(), projectionDirection) - getProjectedPointWithWinding(convexHull[convexHull.size() - 2], projectionDirection)) < 0)
{
convexHull.push_back(points[i]);
}
else
{
while (convexHull.size() > 1 && getRotation(vec, getProjectedPointWithWinding(convexHull.back(), projectionDirection) - getProjectedPointWithWinding(convexHull[convexHull.size() - 2], projectionDirection)) > 0)
{
convexHull.pop_back();
vec = pnt - getProjectedPointWithWinding(convexHull.back(), projectionDirection);
}
convexHull.push_back(points[i]);
}
}
}
uint32_t TriangleProcessor::getTriangleIntersection(TrPrcTriangle& a, TrPrcTriangle2d& aProjected, TrPrcTriangle &b, NvVec3& centroid, std::vector<NvVec3>& intersectionBuffer, NvVec3 normal)
{
b.points[0] -= centroid;
b.points[1] -= centroid;
b.points[2] -= centroid;
ProjectionDirections prjDir = getProjectionDirection(normal);
TrPrcTriangle2d bProjected;
bProjected.points[0] = getProjectedPointWithWinding(b.points[0], prjDir);
bProjected.points[1] = getProjectedPointWithWinding(b.points[1], prjDir);
bProjected.points[2] = getProjectedPointWithWinding(b.points[2], prjDir);
if (!triangleBoundingBoxIntersection(aProjected, bProjected)) return 0;
//* Check triangle A against points of B *//
for (int i = 0; i < 3; ++i)
{
if (isPointInside(bProjected.points[i], aProjected))
{
intersectionBuffer.push_back(b.points[i]);
}
}
//* Check triangle B against points of A *//
for (int i = 0; i < 3; ++i)
{
if (isPointInside(aProjected.points[i], bProjected))
{
intersectionBuffer.push_back(a.points[i]);
}
}
//* Check edges intersection *//
float param = 0;
for (int i = 0; i < 3; ++i)
{
for (int j = 0; j < 3; ++j)
{
if (getSegmentIntersection(aProjected.points[i], aProjected.points[(i + 1) % 3], bProjected.points[j], bProjected.points[(j + 1) % 3], param))
{
intersectionBuffer.push_back(lerp3D(a.points[i], a.points[(i + 1) % 3], param));
}
}
}
if (intersectionBuffer.size() == 0)
return 0;
// Intersection between two triangles is convex, but points should be reordered to construct right polygon //
std::vector<NvVec3> intrs;
buildConvexHull(intersectionBuffer, intrs, normal);
intersectionBuffer = intrs;
// Return all points back from origin //
for (uint32_t i = 0; i < intersectionBuffer.size(); ++i)
{
intersectionBuffer[i] += centroid;
}
return 1;
}
bool TriangleProcessor::triangleBoundingBoxIntersection(TrPrcTriangle2d& a, TrPrcTriangle2d& b)
{
float fb = std::min(a.points[0].x, std::min(a.points[1].x, a.points[2].x));
float fe = std::max(a.points[0].x, std::max(a.points[1].x, a.points[2].x));
float sb = std::min(b.points[0].x, std::min(b.points[1].x, b.points[2].x));
float se = std::max(b.points[0].x, std::max(b.points[1].x, b.points[2].x));
if (std::min(fe, se) + V_COMP_EPS < std::max(fb, sb)) return 0;
fb = std::min(a.points[0].y, std::min(a.points[1].y, a.points[2].y));
fe = std::max(a.points[0].y, std::max(a.points[1].y, a.points[2].y));
sb = std::min(b.points[0].y, std::min(b.points[1].y, b.points[2].y));
se = std::max(b.points[0].y, std::max(b.points[1].y, b.points[2].y));
if (std::min(fe, se) + V_COMP_EPS < std::max(fb, sb)) return 0;
return 1;
}
uint32_t TriangleProcessor::isPointInside(const NvVec2& point, const TrPrcTriangle2d& triangle)
{
float av = getRotation(point - triangle.points[0], triangle.points[1] - triangle.points[0]);
float bv = getRotation(point - triangle.points[1], triangle.points[2] - triangle.points[1]);
float cv = getRotation(point - triangle.points[2], triangle.points[0] - triangle.points[2]);
if (NvAbs(av) < COLLIN_EPS) av = 0;
if (NvAbs(bv) < COLLIN_EPS) bv = 0;
if (NvAbs(cv) < COLLIN_EPS) cv = 0;
if (av >= 0 && bv >= 0 && cv >= 0)
{
if (av == 0 || bv == 0 || cv == 0)
return 2;
return 1;
}
if (av <= 0 && bv <= 0 && cv <= 0)
{
if (av == 0 || bv == 0 || cv == 0)
return 2;
return 1;
}
return 0;
}
} // namespace Blast
} // namespace Nv
|
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringPatternGeneratorImpl.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#define _CRT_SECURE_NO_WARNINGS
#include "NvBlastGlobals.h"
#include "NvBlastAssert.h"
#include "NvBlastExtAuthoringTypes.h"
#include "NvBlastExtAuthoringPatternGeneratorImpl.h"
#include "NvBlastExtAuthoringMeshUtils.h"
#include "NvBlastExtAuthoringMeshImpl.h"
#include "NvBlastExtAuthoringFractureToolImpl.h"
#include "NvBlastExtAuthoringBooleanToolImpl.h"
#include "NvBlastExtAuthoringTriangulator.h"
#include "NvBlastExtAuthoringPerlinNoise.h"
#include <NvBlastNvSharedHelpers.h>
#include <vector>
using namespace Nv::Blast;
using namespace nvidia;
struct DamagePatternImpl : public DamagePattern
{
virtual void release() override;
};
DamagePattern* PatternGeneratorImpl::generateUniformPattern(const UniformPatternDesc* desc)
{
std::vector<NvcVec3> points;
float radiusDelta = desc->radiusMax - desc->radiusMin;
for (uint32_t i = 0; i < desc->cellsCount; ++i)
{
float rd = desc->RNG() * radiusDelta + desc->radiusMin;
if (desc->radiusDistr != 1.0f)
{
rd = std::pow(rd / desc->radiusMax, desc->radiusDistr) * desc->radiusMax;
}
float phi = desc->RNG() * 6.28f;
float theta = (desc->RNG()) * 6.28f;
float x = rd * cos(phi) * sin(theta);
float y = rd * sin(phi) * sin(theta);
float z = rd * cos(theta);
points.push_back({x, y, z});
}
auto pattern = generateVoronoiPattern((uint32_t)points.size(), points.data(), desc->interiorMaterialId);
pattern->activationRadius = desc->radiusMax * desc->debrisRadiusMult;
return pattern;
}
DamagePattern* PatternGeneratorImpl::generateVoronoiPattern(uint32_t cellCount, const NvcVec3* inPoints, int32_t interiorMaterialId)
{
return generateVoronoiPatternInternal(cellCount, inPoints, interiorMaterialId);
}
DamagePattern* PatternGeneratorImpl::generateVoronoiPatternInternal(uint32_t cellCount, const NvcVec3* inPoints, int32_t interiorMaterialId, float angle)
{
DamagePatternImpl* pattern = NVBLAST_NEW(DamagePatternImpl);
std::vector<NvcVec3> points(cellCount);
NvcVec3 orig = {0, 0, 0};
for (uint32_t i = 0; i < cellCount; ++i)
{
points[i] = inPoints[i];
orig = orig + points[i];
}
orig = orig / cellCount;
std::vector<std::vector<std::pair<int32_t, int32_t>>> neighbors;
findCellBasePlanes(points, neighbors);
Mesh** patterns = (Mesh**)NVBLAST_ALLOC(sizeof(Mesh*) * cellCount);
//PreparedMesh** prepMeshes = (PreparedMesh**)NVBLAST_ALLOC(sizeof(PreparedMesh*) * cellCount);
BooleanEvaluator evl;
for (uint32_t i = 0; i < cellCount; ++i)
{
patterns[i] = getCellMesh(evl, 0, i, points, neighbors, interiorMaterialId, orig);
if (patterns[i] == nullptr)
{
continue;
}
if (angle != 0)
{
auto* vr = patterns[i]->getVerticesWritable();
for (uint32_t j = 0; j < patterns[i]->getVerticesCount(); ++j)
{
float& z = vr[j].p.z;
z -= 3.8f;
if (z < -2) // we presume that this vertex has infinite -z position (everything scaled to unit cube).
{
if (angle > 0)
{
float d = sqrt(vr[j].p.x * vr[j].p.x + vr[j].p.y * vr[j].p.y);
vr[j].p.x *= (d + 4 * tan(angle * nvidia::NvPi / 180.f)) / d;
vr[j].p.y *= (d + 4 * tan(angle * nvidia::NvPi / 180.f)) / d;
}
}
}
patterns[i]->recalculateBoundingBox();
}
}
for (int32_t i = cellCount - 1; i >= 0; i--)
{
if (patterns[i] == nullptr)
{
cellCount--;
std::swap(patterns[i], patterns[cellCount]);
//std::swap(prepMeshes[i], prepMeshes[cellCount]);
}
}
pattern->cellsCount = cellCount;
pattern->cellsMeshes = patterns;
//pattern->preparedMeshes = prepMeshes;
#ifdef USE_MERGED_MESH
pattern->outputEdges = NVBLAST_ALLOC(sizeof(BooleanResultEdge) * (cellCount * BLASTRT_MAX_EDGES_PER_CHUNK));
pattern->outputEdgesCount = (uint32_t*)NVBLAST_ALLOC(sizeof(uint32_t) * cellCount);
#endif
return pattern;
}
DamagePattern* PatternGeneratorImpl::generateBeamPattern(const BeamPatternDesc* desc)
{
std::vector<NvcVec3> points;
float radiusDelta = desc->radiusMax - desc->radiusMin;
for (uint32_t i = 0; i < desc->cellsCount; ++i)
{
float rd = desc->RNG() * radiusDelta + desc->radiusMin;
float phi = desc->RNG() * 6.28f;
float x = rd * cos(phi);
float y = rd * sin(phi);
float z = desc->RNG() - 1;
points.push_back({x, y, z});
}
auto pattern = generateVoronoiPattern((uint32_t)points.size(), points.data(), desc->interiorMaterialId);
pattern->activationType = DamagePattern::Line;
return pattern;
}
DamagePattern* PatternGeneratorImpl::generateRegularRadialPattern(const RegularRadialPatternDesc* desc)
{
SimplexNoise noise(desc->radialNoiseAmplitude, desc->radialNoiseFrequency, 3, desc->RNG() * 999999);
std::vector<NvVec3> points;
float radialDelta = (desc->radiusMax - desc->radiusMin) / desc->radialSteps;
float angularDelta = 2 * acos(-1.0f) / desc->angularSteps;
for (uint32_t i = 0; i < desc->radialSteps; ++i)
{
for (uint32_t j = 0; j < desc->angularSteps; ++j)
{
float angle = j * angularDelta + desc->RNG() * desc->angularNoiseAmplitude;
float rd = ((i + noise.sample(NvVec3(angle, 0, 0))) * radialDelta + desc->radiusMin);
float x = rd * cos(angle);
float y = rd * sin(angle);
float z = 0;
points.push_back(NvVec3(x, y, z));
}
}
float mrd = 0.0;
for (uint32_t i = 0; i < points.size(); ++i)
{
mrd = std::max(mrd, points[i].magnitude());
}
for (uint32_t i = 0; i < points.size(); ++i)
{
points[i] *= desc->radiusMax / mrd;
}
float ap = std::max(0.0f, desc->aperture);
auto pattern = generateVoronoiPatternInternal((uint32_t)points.size(), fromNvShared(points.data()), desc->interiorMaterialId, ap);
pattern->activationRadius = desc->radiusMax * desc->debrisRadiusMult;
pattern->activationType = (ap == 0) ? DamagePattern::Line : DamagePattern::Cone;
pattern->angle = ap;
return pattern;
}
void PatternGeneratorImpl::release()
{
NVBLAST_DELETE(this, PatternGeneratorImpl);
}
void DamagePatternImpl::release()
{
if (cellsMeshes)
{
for (uint32_t i = 0; i < cellsCount; i++)
{
cellsMeshes[i]->release();
}
NVBLAST_FREE(cellsMeshes);
}
#ifdef USE_MERGED_MESH
if (outputEdges)
{
NVBLAST_FREE(outputEdges);
}
if (outputEdgesCount)
{
NVBLAST_FREE(outputEdgesCount);
}
if (mergedMesh)
{
mergedMesh->release();
}
if (preparedMergedMesh)
{
preparedMergedMesh->release();
}
if (validFacetsForChunk)
{
for (uint32_t i = 0; i < cellsCount; i++)
{
if (validFacetsForChunk[i])
{
NVBLAST_FREE(validFacetsForChunk[i]);
}
}
NVBLAST_FREE(validFacetsForChunk);
}
#endif
NVBLAST_DELETE(this, DamagePatternImpl);
}
namespace Nv
{
namespace Blast
{
void savePatternToObj(DamagePattern* pattern)
{
FILE* fl = fopen("Pattern.obj", "w");
std::vector<uint32_t> trc;
for (uint32_t mesh = 0; mesh < pattern->cellsCount; ++mesh)
{
Mesh* m = pattern->cellsMeshes[mesh];
Triangulator trgl;
trgl.triangulate(m);
auto& t = trgl.getBaseMesh();
for (uint32_t v = 0; v < t.size(); ++v)
{
fprintf(fl, "v %f %f %f\n", t[v].a.p.x, t[v].a.p.y, t[v].a.p.z);
fprintf(fl, "v %f %f %f\n", t[v].b.p.x, t[v].b.p.y, t[v].b.p.z);
fprintf(fl, "v %f %f %f\n", t[v].c.p.x, t[v].c.p.y, t[v].c.p.z);
}
trc.push_back(t.size());
}
uint32_t cv = 1;
for (uint32_t m = 0; m < trc.size(); ++m)
{
fprintf(fl, "g %d\n", m);
for (uint32_t k = 0; k < trc[m]; ++k)
{
fprintf(fl, "f %d %d %d \n", cv, cv + 1, cv + 2);
cv += 3;
}
}
fclose(fl);
}
}
} |
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringBooleanToolImpl.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTEXTAUTHORINGBOOLEANTOOLIMPL_H
#define NVBLASTEXTAUTHORINGBOOLEANTOOLIMPL_H
#include "NvBlastExtAuthoringTypes.h"
#include "NvBlastExtAuthoringInternalCommon.h"
#include "NvBlastExtAuthoringBooleanTool.h"
#include <vector>
#include "NvBlastTypes.h"
namespace Nv
{
namespace Blast
{
class Mesh;
/**
Boolean tool config, used to perform different operations: UNION, INTERSECTION, DIFFERENCE
*/
struct BooleanConf
{
int32_t ca, cb, ci;
BooleanConf(int32_t a, int32_t b, int32_t c) : ca(a), cb(b), ci(c)
{
}
};
namespace BooleanConfigurations
{
/**
Creates boolean tool configuration to perform intersection of meshes A and B.
*/
inline BooleanConf BOOLEAN_INTERSECTION()
{
return BooleanConf(0, 0, 1);
}
/**
Creates boolean tool configuration to perform union of meshes A and B.
*/
inline BooleanConf BOOLEAN_UNION()
{
return BooleanConf(1, 1, -1);
}
/**
Creates boolean tool configuration to perform difference of meshes(A - B).
*/
inline BooleanConf BOOLEAN_DIFFERENCE()
{
return BooleanConf(1, 0, -1);
}
}
/**
Structure which holds information about intersection facet with edge.
*/
struct EdgeFacetIntersectionData
{
int32_t edId;
int32_t intersectionType;
Vertex intersectionPoint;
EdgeFacetIntersectionData(int32_t edId, int32_t intersType, Vertex& inters) : edId(edId), intersectionType(intersType), intersectionPoint(inters)
{ }
EdgeFacetIntersectionData(int32_t edId) : edId(edId)
{ }
bool operator<(const EdgeFacetIntersectionData& b) const
{
return edId < b.edId;
}
};
class SpatialAccelerator;
/**
Tool for performing boolean operations on polygonal meshes.
Tool supports only closed meshes. Performing boolean on meshes with holes can lead to unexpected behavior, e.g. holes in result geometry.
*/
class BooleanEvaluator
{
public:
BooleanEvaluator();
~BooleanEvaluator();
/**
Perform boolean operation on two polygonal meshes (A and B).
\param[in] meshA Mesh A
\param[in] meshB Mesh B
\param[in] spAccelA Acceleration structure for mesh A
\param[in] spAccelB Acceleration structure for mesh B
\param[in] mode Boolean operation type
*/
void performBoolean(const Mesh* meshA, const Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, const BooleanConf& mode);
/**
Perform boolean operation on two polygonal meshes (A and B).
\param[in] meshA Mesh A
\param[in] meshB Mesh B
\param[in] mode Boolean operation type
*/
void performBoolean(const Mesh* meshA, const Mesh* meshB, const BooleanConf& mode);
/**
Perform cutting of mesh with some large box, which represents cutting plane. This method skips part of intersetion computations, so
should be used ONLY with cutting box, received from getBigBox(...) method from NvBlastExtAuthoringMesh.h. For cutting use only BOOLEAN_INTERSECTION or BOOLEAN_DIFFERENCE mode.
\param[in] meshA Mesh A
\param[in] meshB Cutting box
\param[in] spAccelA Acceleration structure for mesh A
\param[in] spAccelB Acceleration structure for cutting box
\param[in] mode Boolean operation type
*/
void performFastCutting(const Mesh* meshA, const Mesh* meshB, SpatialAccelerator* spAccelA, SpatialAccelerator* spAccelB, const BooleanConf& mode);
/**
Perform cutting of mesh with some large box, which represents cutting plane. This method skips part of intersetion computations, so
should be used ONLY with cutting box, received from getBigBox(...) method from NvBlastExtAuthoringMesh.h. For cutting use only BOOLEAN_INTERSECTION or BOOLEAN_DIFFERENCE mode.
\param[in] meshA Mesh A
\param[in] meshB Cutting box
\param[in] mode Boolean operation type
*/
void performFastCutting(const Mesh* meshA, const Mesh* meshB, const BooleanConf& mode);
/**
Test whether point contained in mesh.
\param[in] mesh Mesh geometry
\param[in] point Point which should be tested
\return not 0 if point is inside of mesh
*/
int32_t isPointContainedInMesh(const Mesh* mesh, const NvcVec3& point);
/**
Test whether point contained in mesh.
\param[in] mesh Mesh geometry
\param[in] spAccel Acceleration structure for mesh
\param[in] point Point which should be tested
\return not 0 if point is inside of mesh
*/
int32_t isPointContainedInMesh(const Mesh* mesh, SpatialAccelerator* spAccel, const NvcVec3& point);
/**
Generates result polygon mesh after performing boolean operation.
\return If not nullptr - result mesh geometry.
*/
Mesh* createNewMesh();
/**
Reset tool state.
*/
void reset();
private:
void buildFaceFaceIntersections(const BooleanConf& mode);
void buildFastFaceFaceIntersection(const BooleanConf& mode);
void collectRetainedPartsFromA(const BooleanConf& mode);
void collectRetainedPartsFromB(const BooleanConf& mode);
int32_t addIfNotExist(const Vertex& p);
void addEdgeIfValid(const EdgeWithParent& ed);
private:
int32_t vertexMeshStatus03(const NvcVec3& p, const Mesh* mesh);
int32_t vertexMeshStatus30(const NvcVec3& p, const Mesh* mesh);
const Mesh* mMeshA;
const Mesh* mMeshB;
SpatialAccelerator* mAcceleratorA;
SpatialAccelerator* mAcceleratorB;
std::vector<EdgeWithParent> mEdgeAggregate;
std::vector<Vertex> mVerticesAggregate;
std::vector<std::vector<EdgeFacetIntersectionData> > mEdgeFacetIntersectionData12;
std::vector<std::vector<EdgeFacetIntersectionData> > mEdgeFacetIntersectionData21;
};
/// BooleanTool
class BooleanToolImpl : public BooleanTool
{
public:
/**
* Release BooleanTool memory
*/
virtual void release() override;
virtual Mesh* performBoolean(const Mesh* meshA, SpatialAccelerator* accelA, const Mesh* meshB, SpatialAccelerator* accelB, BooleanTool::Op op) override;
virtual bool pointInMesh(const Mesh* mesh, SpatialAccelerator* accel, const NvcVec3& point) override;
private:
BooleanEvaluator m_evaluator;
};
} // namespace Blast
} // namespace Nv
#endif // ifndef NVBLASTEXTAUTHORINGBOOLEANTOOLIMPL_H
|
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshCleanerImpl.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#include "NvVec3.h"
#include "NvVec2.h"
#include "NvBounds3.h"
#include <vector>
#include <queue>
#include <map>
#include <NvBlastExtAuthoringMeshCleanerImpl.h>
#include <NvBlastExtAuthoringMeshImpl.h>
#include <NvBlastExtAuthoringInternalCommon.h>
#include <NvBlastNvSharedHelpers.h>
#include <boost/multiprecision/cpp_int.hpp>
using namespace nvidia;
using namespace Nv::Blast;
using namespace boost::multiprecision;
/**
Exact rational vector types.
*/
struct RVec3
{
cpp_rational x, y, z;
RVec3() {}
bool isZero()
{
return x.is_zero() && y.is_zero() && z.is_zero();
}
RVec3(cpp_rational _x, cpp_rational _y, cpp_rational _z)
{
x = _x;
y = _y;
z = _z;
}
RVec3(const NvcVec3& p)
{
x = cpp_rational(p.x);
y = cpp_rational(p.y);
z = cpp_rational(p.z);
}
NvVec3 toVec3()
{
return { x.convert_to<float>(), y.convert_to<float>(), z.convert_to<float>() };
}
RVec3 operator-(const RVec3& b) const
{
return RVec3(x - b.x, y - b.y, z - b.z);
}
RVec3 operator+(const RVec3& b) const
{
return RVec3(x + b.x, y + b.y, z + b.z);
}
RVec3 cross(const RVec3& in) const
{
return RVec3(y * in.z - in.y * z, in.x * z - x * in.z, x * in.y - in.x * y);
}
cpp_rational dot(const RVec3& in) const
{
return x * in.x + y * in.y + z * in.z;
}
RVec3 operator*(const cpp_rational& in) const
{
return RVec3(x * in, y * in, z * in);
}
};
struct RVec2
{
cpp_rational x, y;
RVec2() {}
RVec2(cpp_rational _x, cpp_rational _y)
{
x = _x;
y = _y;
}
RVec2(const NvcVec2& p)
{
x = cpp_rational(p.x);
y = cpp_rational(p.y);
}
NvVec2 toVec2()
{
return { x.convert_to<float>(), y.convert_to<float>() };
}
RVec2 operator-(const RVec2& b) const
{
return RVec2(x - b.x, y - b.y);
}
RVec2 operator+(const RVec2& b) const
{
return RVec2(x + b.x, y + b.y);
}
cpp_rational cross(const RVec2& in) const
{
return x * in.y - y * in.x;
}
cpp_rational dot(const RVec2& in) const
{
return x * in.x + y * in.y;
}
RVec2 operator*(const cpp_rational& in) const
{
return RVec2(x * in, y * in);
}
};
struct RatPlane
{
RVec3 n;
cpp_rational d;
RatPlane(const RVec3& a, const RVec3& b, const RVec3& c)
{
n = (b - a).cross(c - a);
d = -n.dot(a);
};
cpp_rational distance(RVec3& in)
{
return n.dot(in) + d;
}
};
bool isSame(const RatPlane& a, const RatPlane& b)
{
if (a.d != b.d)
return false;
if (a.n.x != b.n.x || a.n.y != b.n.y || a.n.z != b.n.z)
return false;
return true;
}
RVec3 planeSegmInters(RVec3& a, RVec3& b, RatPlane& pl)
{
cpp_rational t = -(a.dot(pl.n) + pl.d) / pl.n.dot(b - a);
RVec3 on = a + (b - a) * t;
return on;
}
enum POINT_CLASS
{
ON_AB = 0,
ON_BC = 1,
ON_AC = 2,
INSIDE_TR,
OUTSIDE_TR,
ON_VERTEX
};
int32_t isPointInside(const RVec2& a, const RVec2& b, const RVec2& c, const RVec2& p)
{
cpp_rational v1 = (b - a).cross(p - a);
cpp_rational v2 = (c - b).cross(p - b);
cpp_rational v3 = (a - c).cross(p - c);
int32_t v1s = v1.sign();
int32_t v2s = v2.sign();
int32_t v3s = v3.sign();
if (v1s * v2s < 0 || v1s * v3s < 0 || v2s * v3s < 0)
return OUTSIDE_TR;
if (v1s == 0 && v2s == 0)
return OUTSIDE_TR;
if (v1s == 0 && v3s == 0)
return OUTSIDE_TR;
if (v2s == 0 && v3s == 0)
return OUTSIDE_TR;
if (v1s == 0)
return ON_AB;
if (v2s == 0)
return ON_BC;
if (v3s == 0)
return ON_AC;
return INSIDE_TR;
}
RVec2 getProjectedPointWithWinding(const RVec3& point, ProjectionDirections dir)
{
if (dir & YZ_PLANE)
{
if (dir & OPPOSITE_WINDING)
{
return RVec2(point.z, point.y);
}
else
return RVec2(point.y, point.z);
}
if (dir & ZX_PLANE)
{
if (dir & OPPOSITE_WINDING)
{
return RVec2(point.z, point.x);
}
return RVec2(point.x, point.z);
}
if (dir & OPPOSITE_WINDING)
{
return RVec2(point.y, point.x);
}
return RVec2(point.x, point.y);
}
struct DelTriangle
{
int32_t p[3];
int32_t n[3];
int32_t parentTriangle;
int32_t getEdWP(int32_t vrt)
{
if (p[0] == vrt)
return 1;
if (p[1] == vrt)
return 2;
if (p[2] == vrt)
return 0;
return -1;
}
int32_t getEdId(int32_t v1, int32_t v2)
{
if (p[0] == v1 && p[1] == v2)
return 0;
if (p[1] == v1 && p[2] == v2)
return 1;
if (p[2] == v1 && p[0] == v2)
return 2;
return -1;
}
int32_t getOppP(int32_t v1, int32_t v2)
{
if (p[0] == v1 && p[1] == v2)
return 2;
if (p[1] == v1 && p[2] == v2)
return 0;
if (p[2] == v1 && p[0] == v2)
return 1;
return -1;
}
int32_t getOppPoint(int32_t v1, int32_t v2)
{
if (p[0] != v1 && p[0] != v2)
return p[0];
if (p[1] != v1 && p[1] != v2)
return p[1];
if (p[2] != v1 && p[2] != v2)
return p[2];
return -1;
}
bool compare(const DelTriangle& t) const
{
if (p[0] == t.p[0] && p[1] == t.p[1] && p[2] == t.p[2])
return true;
if (p[1] == t.p[0] && p[2] == t.p[1] && p[0] == t.p[2])
return true;
if (p[2] == t.p[0] && p[0] == t.p[1] && p[1] == t.p[2])
return true;
return false;
}
};
struct DelEdge
{
int32_t s, e;
int32_t nr, nl;
};
bool isIntersectsTriangle(RVec2& a, RVec2& b, RVec2& c, RVec2& s, RVec2& e)
{
RVec2 vec = e - s;
if ((a - s).cross(vec) * (b - s).cross(vec) < 0)
{
RVec2 vec2 = b - a;
if ((s - a).cross(vec2) * (e - a).cross(vec) < 0)
return true;
}
if ((b - s).cross(vec) * (c - s).cross(vec) < 0)
{
RVec2 vec2 = c - b;
if ((s - b).cross(vec2) * (e - b).cross(vec) < 0)
return true;
}
if ((a - s).cross(vec) * (c - s).cross(vec) < 0)
{
RVec2 vec2 = a - c;
if ((s - c).cross(vec2) * (e - c).cross(vec) < 0)
return true;
}
return false;
}
inline int32_t inCircumcircle(RVec2& a, RVec2& b, RVec2& c, RVec2& p)
{
RVec2 ta = a - p;
RVec2 tb = b - p;
RVec2 tc = c - p;
cpp_rational ad = ta.dot(ta);
cpp_rational bd = tb.dot(tb);
cpp_rational cd = tc.dot(tc);
cpp_rational pred =
ta.x * (tb.y * cd - tc.y * bd) - ta.y * (tb.x * cd - tc.x * bd) + ad * (tb.x * tc.y - tc.x * tb.y);
if (pred > 0)
return 1;
if (pred < 0)
return -1;
return 0;
}
int32_t getEdge(std::vector<DelEdge>& edges, int32_t s, int32_t e)
{
for (uint32_t i = 0; i < edges.size(); ++i)
{
if (edges[i].s == s && edges[i].e == e)
return i;
}
edges.push_back(DelEdge());
edges.back().s = s;
edges.back().e = e;
return edges.size() - 1;
}
void reubildAdjacency(std::vector<DelTriangle>& state)
{
for (uint32_t i = 0; i < state.size(); ++i)
{
state[i].n[0] = state[i].n[1] = state[i].n[2] = -1;
}
for (uint32_t i = 0; i < state.size(); ++i)
{
if (state[i].p[0] == -1)
continue;
for (uint32_t j = i + 1; j < state.size(); ++j)
{
if (state[j].p[0] == -1)
continue;
for (uint32_t k = 0; k < 3; ++k)
{
for (uint32_t c = 0; c < 3; ++c)
{
if (state[i].p[k] == state[j].p[(c + 1) % 3] && state[i].p[(k + 1) % 3] == state[j].p[c])
{
state[i].n[k] = j;
state[j].n[c] = i;
}
}
}
}
}
}
void insertPoint(std::vector<RVec2>& vertices, std::vector<DelTriangle>& state, int32_t p, const std::vector<Edge>& edges)
{
std::queue<int32_t> triangleToCheck;
for (uint32_t i = 0; i < state.size(); ++i)
{
if (state[i].p[0] == -1)
continue;
DelTriangle ctr = state[i];
int32_t cv = isPointInside(vertices[ctr.p[0]], vertices[ctr.p[1]], vertices[ctr.p[2]], vertices[p]);
if (cv == OUTSIDE_TR)
continue;
if (cv == INSIDE_TR)
{
uint32_t taInd = state.size();
uint32_t tbInd = state.size() + 1;
uint32_t tcInd = state.size() + 2;
state.resize(state.size() + 3);
state[taInd].p[0] = ctr.p[2];
state[taInd].p[1] = ctr.p[0];
state[taInd].p[2] = p;
state[taInd].n[0] = ctr.n[2];
state[taInd].n[1] = tbInd;
state[taInd].n[2] = tcInd;
state[tbInd].p[0] = ctr.p[0];
state[tbInd].p[1] = ctr.p[1];
state[tbInd].p[2] = p;
state[tbInd].n[0] = ctr.n[0];
state[tbInd].n[1] = tcInd;
state[tbInd].n[2] = taInd;
state[tcInd].p[0] = ctr.p[1];
state[tcInd].p[1] = ctr.p[2];
state[tcInd].p[2] = p;
state[tcInd].n[0] = ctr.n[1];
state[tcInd].n[1] = taInd;
state[tcInd].n[2] = tbInd;
triangleToCheck.push(taInd);
triangleToCheck.push(tbInd);
triangleToCheck.push(tcInd);
/**
Change neighbors
*/
int32_t nb = state[i].n[0];
if (nb != -1)
state[nb].n[state[nb].getEdId(state[i].p[1], state[i].p[0])] = tbInd;
nb = state[i].n[1];
if (nb != -1)
state[nb].n[state[nb].getEdId(state[i].p[2], state[i].p[1])] = tcInd;
nb = state[i].n[2];
if (nb != -1)
state[nb].n[state[nb].getEdId(state[i].p[0], state[i].p[2])] = taInd;
state[i].p[0] = -1;
}
else
{
uint32_t taInd = state.size();
uint32_t tbInd = state.size() + 1;
state.resize(state.size() + 2);
int32_t bPoint = state[i].p[(cv + 2) % 3];
state[taInd].p[0] = bPoint;
state[taInd].p[1] = state[i].p[cv];
state[taInd].p[2] = p;
state[tbInd].p[0] = bPoint;
state[tbInd].p[1] = p;
state[tbInd].p[2] = state[i].p[(cv + 1) % 3];
state[taInd].n[0] = state[i].n[(cv + 2) % 3];
state[taInd].n[1] = -1;
state[taInd].n[2] = tbInd;
state[tbInd].n[0] = taInd;
state[tbInd].n[1] = -1;
state[tbInd].n[2] = state[i].n[(cv + 1) % 3];
if (state[i].n[(cv + 1) % 3] != -1)
for (int32_t k = 0; k < 3; ++k)
if (state[state[i].n[(cv + 1) % 3]].n[k] == (int32_t)i)
{
state[state[i].n[(cv + 1) % 3]].n[k] = tbInd;
break;
}
if (state[i].n[(cv + 2) % 3] != -1)
for (int32_t k = 0; k < 3; ++k)
if (state[state[i].n[(cv + 2) % 3]].n[k] == (int32_t)i)
{
state[state[i].n[(cv + 2) % 3]].n[k] = taInd;
break;
}
triangleToCheck.push(taInd);
triangleToCheck.push(tbInd);
int32_t total = 2;
int32_t oppositeTr = 0;
if (state[i].n[cv] != -1)
{
oppositeTr = state[i].n[cv];
total += 2;
uint32_t tcInd = state.size();
uint32_t tdInd = state.size() + 1;
state.resize(state.size() + 2);
int32_t oped = state[oppositeTr].getEdId(state[i].p[(cv + 1) % 3], state[i].p[cv]);
state[tcInd].n[0] = state[oppositeTr].n[(oped + 2) % 3];
state[tcInd].n[1] = tbInd;
state[tbInd].n[1] = tcInd;
state[tcInd].n[2] = tdInd;
state[tdInd].n[0] = tcInd;
state[tdInd].n[1] = taInd;
state[taInd].n[1] = tdInd;
state[tdInd].n[2] = state[oppositeTr].n[(oped + 1) % 3];
if (state[oppositeTr].n[(oped + 2) % 3] != -1)
for (int32_t k = 0; k < 3; ++k)
if (state[state[oppositeTr].n[(oped + 2) % 3]].n[k] == oppositeTr)
{
state[state[oppositeTr].n[(oped + 2) % 3]].n[k] = tcInd;
break;
}
if (state[oppositeTr].n[(oped + 1) % 3] != -1)
for (int32_t k = 0; k < 3; ++k)
if (state[state[oppositeTr].n[(oped + 1) % 3]].n[k] == oppositeTr)
{
state[state[oppositeTr].n[(oped + 1) % 3]].n[k] = tdInd;
break;
}
int32_t pop = state[oppositeTr].p[(oped + 2) % 3];
state[tcInd].p[0] = pop;
state[tcInd].p[1] = state[i].p[(cv + 1) % 3];
state[tcInd].p[2] = p;
state[tdInd].p[0] = pop;
state[tdInd].p[1] = p;
state[tdInd].p[2] = state[i].p[cv];
state[oppositeTr].p[0] = -1;
triangleToCheck.push(tcInd);
triangleToCheck.push(tdInd);
}
state[i].p[0] = -1;
}
break;
}
while (!triangleToCheck.empty())
{
int32_t ctrid = triangleToCheck.front();
triangleToCheck.pop();
DelTriangle& ctr = state[ctrid];
int32_t oppTr = -5;
int32_t ced = 0;
for (uint32_t i = 0; i < 3; ++i)
{
if (ctr.p[i] != p && ctr.p[(i + 1) % 3] != p)
{
ced = i;
oppTr = ctr.n[i];
break;
}
}
if (oppTr == -1)
continue;
bool toCont = false;
for (size_t i = 0; i < edges.size(); ++i)
{
if ((int32_t)edges[i].s == ctr.p[ced] && ctr.p[(ced + 1) % 3] == (int32_t)edges[i].e)
{
toCont = true;
break;
}
if ((int32_t)edges[i].e == ctr.p[ced] && ctr.p[(ced + 1) % 3] == (int32_t)edges[i].s)
{
toCont = true;
break;
}
}
if (toCont)
continue;
DelTriangle& otr = state[oppTr];
if (inCircumcircle(vertices[state[oppTr].p[0]], vertices[state[oppTr].p[1]], vertices[state[oppTr].p[2]],
vertices[p]) > 0)
{
int32_t notPIndx = 0;
for (; notPIndx < 3; ++notPIndx)
{
if (otr.p[notPIndx] != ctr.p[0] && otr.p[notPIndx] != ctr.p[1] && otr.p[notPIndx] != ctr.p[2])
break;
}
int32_t oppCed = state[oppTr].getEdId(ctr.p[(ced + 1) % 3], ctr.p[ced]);
int32_t ntr1 = ctrid, ntr2 = oppTr;
DelTriangle nt1, nt2;
nt1.p[0] = state[oppTr].p[notPIndx];
nt1.p[1] = p;
nt1.n[0] = ntr2;
nt1.p[2] = ctr.p[ced];
nt1.n[1] = ctr.n[(ced + 2) % 3];
nt1.n[2] = otr.n[(oppCed + 1) % 3];
if (nt1.n[2] != -1)
for (uint32_t k = 0; k < 3; ++k)
if (state[nt1.n[2]].n[k] == oppTr)
state[nt1.n[2]].n[k] = ntr1;
nt2.p[0] = p;
nt2.p[1] = state[oppTr].p[notPIndx];
nt2.n[0] = ntr1;
nt2.p[2] = ctr.p[(ced + 1) % 3];
nt2.n[1] = otr.n[(oppCed + 2) % 3];
nt2.n[2] = ctr.n[(ced + 1) % 3];
if (nt2.n[2] != -1)
for (uint32_t k = 0; k < 3; ++k)
if (state[nt2.n[2]].n[k] == ctrid)
state[nt2.n[2]].n[k] = ntr2;
state[ntr1] = nt1;
state[ntr2] = nt2;
triangleToCheck.push(ntr1);
triangleToCheck.push(ntr2);
}
}
}
bool edgeIsIntersected(const RVec2& a, const RVec2& b, const RVec2& es, const RVec2& ee)
{
RVec2 t = b - a;
cpp_rational temp = (es - a).cross(t) * (ee - a).cross(t);
if (temp < 0)
{
t = es - ee;
if ((a - ee).cross(t) * (b - ee).cross(t) <= 0)
return true;
}
return false;
}
void triangulatePseudoPolygon(std::vector<RVec2>& vertices, int32_t ba, int32_t bb, std::vector<int32_t>& pseudo,
std::vector<DelTriangle>& output)
{
if (pseudo.empty())
return;
int32_t c = 0;
if (pseudo.size() > 1)
{
for (uint32_t i = 1; i < pseudo.size(); ++i)
{
if (inCircumcircle(vertices[ba], vertices[bb], vertices[pseudo[c]], vertices[pseudo[i]]) > 0)
{
c = i;
}
}
std::vector<int32_t> toLeft;
std::vector<int32_t> toRight;
for (int32_t t = 0; t < c; ++t)
{
toLeft.push_back(pseudo[t]);
}
for (size_t t = c + 1; t < pseudo.size(); ++t)
{
toRight.push_back(pseudo[t]);
}
if (toLeft.size() > 0)
triangulatePseudoPolygon(vertices, ba, pseudo[c], toLeft, output);
if (toRight.size() > 0)
triangulatePseudoPolygon(vertices, pseudo[c], bb, toRight, output);
}
output.push_back(DelTriangle());
output.back().p[0] = ba;
output.back().p[1] = bb;
output.back().p[2] = pseudo[c];
}
void insertEdge(std::vector<RVec2>& vertices, std::vector<DelTriangle>& output, int32_t edBeg, int32_t edEnd)
{
bool hasEdge = false;
for (auto& it : output)
{
for (uint32_t i = 0; i < 3; ++i)
if ((it.p[i] == edBeg || it.p[i] == edEnd) && (it.p[(i + 1) % 3] == edBeg || it.p[(i + 1) % 3] == edEnd))
{
hasEdge = true;
}
}
if (hasEdge)
return;
int32_t startTriangle = -1;
int32_t edg = -1;
for (uint32_t i = 0; i < output.size(); ++i)
{
if (output[i].p[0] == -1)
continue;
if (output[i].p[0] == edBeg || output[i].p[1] == edBeg || output[i].p[2] == edBeg)
{
edg = output[i].getEdWP(edBeg);
if (edgeIsIntersected(vertices[edBeg], vertices[edEnd], vertices[output[i].p[edg]],
vertices[output[i].p[(edg + 1) % 3]]))
{
startTriangle = i;
break;
}
}
}
if (startTriangle == -1)
{
return;
}
int32_t cvertex = edBeg;
std::vector<int32_t> pointsAboveEdge;
std::vector<int32_t> pointsBelowEdge;
RVec2 vec = vertices[edEnd] - vertices[edBeg];
if (vec.cross(vertices[output[startTriangle].p[edg]] - vertices[edBeg]) > 0)
{
pointsAboveEdge.push_back(output[startTriangle].p[edg]);
pointsBelowEdge.push_back(output[startTriangle].p[(edg + 1) % 3]);
}
else
{
pointsBelowEdge.push_back(output[startTriangle].p[edg]);
pointsAboveEdge.push_back(output[startTriangle].p[(edg + 1) % 3]);
}
while (1)
{
DelTriangle& ctr = output[startTriangle];
int32_t oed = ctr.getEdWP(cvertex);
int32_t nextTriangle = ctr.n[oed];
if (output[nextTriangle].p[0] == edEnd || output[nextTriangle].p[1] == edEnd || output[nextTriangle].p[2] == edEnd)
{
ctr.p[0] = -1;
output[nextTriangle].p[0] = -1;
break;
}
DelTriangle& otr = output[nextTriangle];
int32_t opp = otr.p[otr.getOppP(ctr.p[(oed + 1) % 3], ctr.p[oed % 3])];
int32_t nextPoint = 0;
if (vec.cross((vertices[opp] - vertices[edBeg])) > 0)
{
pointsAboveEdge.push_back(opp);
if (vec.cross(vertices[ctr.p[(oed + 1) % 3]] - vertices[edBeg]) > 0)
{
nextPoint = ctr.p[(oed + 1) % 3];
}
else
{
nextPoint = ctr.p[oed];
}
}
else
{
pointsBelowEdge.push_back(opp);
if (vec.cross(vertices[ctr.p[(oed + 1) % 3]] - vertices[edBeg]) < 0)
{
nextPoint = ctr.p[(oed + 1) % 3];
}
else
{
nextPoint = ctr.p[oed];
}
}
startTriangle = nextTriangle;
cvertex = nextPoint;
ctr.p[0] = -1;
}
triangulatePseudoPolygon(vertices, edBeg, edEnd, pointsAboveEdge, output);
std::reverse(pointsBelowEdge.begin(), pointsBelowEdge.end());
triangulatePseudoPolygon(vertices, edEnd, edBeg, pointsBelowEdge, output);
reubildAdjacency(output);
}
void buildCDT(std::vector<RVec3>& vertices, std::vector<Edge>& edges, std::vector<DelTriangle>& output,
ProjectionDirections dr)
{
std::vector<DelTriangle> state;
DelTriangle crt;
std::vector<bool> added(vertices.size(), false);
for (uint32_t i = 0; i < 3; ++i)
{
crt.p[i] = edges[i].s;
added[edges[i].s] = true;
crt.n[i] = -1; // dont have neighbors;
}
state.push_back(crt);
std::vector<RVec2> p2d(vertices.size());
for (uint32_t i = 0; i < vertices.size(); ++i)
{
p2d[i] = getProjectedPointWithWinding(vertices[i], dr);
}
for (size_t i = 0; i < edges.size(); ++i)
{
if (!added[edges[i].s])
{
insertPoint(p2d, state, edges[i].s, edges);
added[edges[i].s] = true;
}
if (!added[edges[i].e])
{
insertPoint(p2d, state, edges[i].e, edges);
added[edges[i].e] = true;
}
if (edges[i].s != edges[i].e)
{
insertEdge(p2d, state, edges[i].s, edges[i].e);
}
}
for (uint32_t t = 0; t < state.size(); ++t)
{
if (state[t].p[0] != -1)
{
output.push_back(state[t]);
}
}
}
int32_t intersectSegments(RVec3& s1, RVec3& e1, RVec3& s2, RVec3& e2, ProjectionDirections dir,
std::vector<cpp_rational>& t1v, std::vector<cpp_rational>& t2v);
void getTriangleIntersectionCoplanar(uint32_t tr1, uint32_t tr2, std::vector<std::vector<RVec3> >& stencil,
ProjectionDirections dr)
{
std::vector<cpp_rational> intr1[3];
std::vector<cpp_rational> intr2[3];
RVec3 p1[3];
p1[0] = stencil[tr1][0];
p1[1] = stencil[tr1][1];
p1[2] = stencil[tr1][3];
RVec3 p2[3];
p2[0] = stencil[tr2][0];
p2[1] = stencil[tr2][1];
p2[2] = stencil[tr2][3];
for (uint32_t i = 0; i < 3; ++i)
{
for (uint32_t j = 0; j < 3; ++j)
{
intersectSegments(p1[i], p1[(i + 1) % 3], p2[j], p2[(j + 1) % 3], dr, intr1[i], intr2[j]);
}
}
int32_t inRel1[3];
for (uint32_t i = 0; i < 3; ++i)
{
inRel1[i] = isPointInside(getProjectedPointWithWinding(p2[0], dr), getProjectedPointWithWinding(p2[1], dr),
getProjectedPointWithWinding(p2[2], dr), getProjectedPointWithWinding(p1[i], dr));
}
int32_t inRel2[3];
for (uint32_t i = 0; i < 3; ++i)
{
inRel2[i] = isPointInside(getProjectedPointWithWinding(p1[0], dr), getProjectedPointWithWinding(p1[1], dr),
getProjectedPointWithWinding(p1[2], dr), getProjectedPointWithWinding(p2[i], dr));
}
for (uint32_t i = 0; i < 3; ++i)
{
if (inRel1[i] == INSIDE_TR && inRel1[(i + 1) % 3] == INSIDE_TR)
{
stencil[tr2].push_back(p1[i]);
stencil[tr2].push_back(p1[(i + 1) % 3]);
}
else
{
if (inRel1[i] == INSIDE_TR && intr1[i].size() == 1)
{
stencil[tr2].push_back(p1[i]);
stencil[tr2].push_back((p1[(i + 1) % 3] - p1[i]) * intr1[i][0] + p1[i]);
}
if (inRel1[(i + 1) % 3] == INSIDE_TR && intr1[i].size() == 1)
{
stencil[tr2].push_back(p1[(i + 1) % 3]);
stencil[tr2].push_back((p1[(i + 1) % 3] - p1[i]) * intr1[i][0] + p1[i]);
}
if (intr1[i].size() == 2)
{
stencil[tr2].push_back((p1[(i + 1) % 3] - p1[i]) * intr1[i][0] + p1[i]);
stencil[tr2].push_back((p1[(i + 1) % 3] - p1[i]) * intr1[i][1] + p1[i]);
}
}
}
for (uint32_t i = 0; i < 3; ++i)
{
if (inRel2[i] == INSIDE_TR && inRel2[(i + 1) % 3] == INSIDE_TR)
{
stencil[tr1].push_back(p2[i]);
stencil[tr1].push_back(p2[(i + 1) % 3]);
}
else
{
if (inRel2[i] == INSIDE_TR && intr2[i].size() == 1)
{
stencil[tr1].push_back(p2[i]);
stencil[tr1].push_back((p2[(i + 1) % 3] - p2[i]) * intr2[i][0] + p2[i]);
}
if (inRel2[(i + 1) % 3] == INSIDE_TR && intr2[i].size() == 1)
{
stencil[tr1].push_back(p2[(i + 1) % 3]);
stencil[tr1].push_back((p2[(i + 1) % 3] - p2[i]) * intr2[i][0] + p2[i]);
}
if (intr2[i].size() == 2)
{
stencil[tr1].push_back((p2[(i + 1) % 3] - p2[i]) * intr2[i][0] + p2[i]);
stencil[tr1].push_back((p2[(i + 1) % 3] - p2[i]) * intr2[i][1] + p2[i]);
}
}
}
}
int32_t
getTriangleIntersection3d(uint32_t tr1, uint32_t tr2, std::vector<std::vector<RVec3> >& stencil, ProjectionDirections dr)
{
RatPlane pl1(stencil[tr1][0], stencil[tr1][1], stencil[tr1][3]);
if (pl1.n.isZero())
{
std::swap(tr1, tr2);
pl1 = RatPlane(stencil[tr1][0], stencil[tr1][1], stencil[tr1][3]);
if (pl1.n.isZero())
return 0;
}
cpp_rational d1 = pl1.distance(stencil[tr2][0]);
cpp_rational d2 = pl1.distance(stencil[tr2][1]);
cpp_rational d3 = pl1.distance(stencil[tr2][3]);
int32_t sd1 = d1.sign();
int32_t sd2 = d2.sign();
int32_t sd3 = d3.sign();
if (sd1 == 0 && sd2 == 0 && sd3 == 0)
{
getTriangleIntersectionCoplanar(tr1, tr2, stencil, dr);
return 0;
}
/**
Never intersected
*/
if (sd1 < 0 && sd2 < 0 && sd3 < 0)
return 0;
if (sd1 > 0 && sd2 > 0 && sd3 > 0)
return 0;
RVec3 tb0 = stencil[tr2][0];
RVec3 tb1 = stencil[tr2][1];
RVec3 tb2 = stencil[tr2][3];
if (sd1 * sd3 > 0)
{
std::swap(tb1, tb2);
std::swap(d2, d3);
}
else
{
if (sd2 * sd3 > 0)
{
std::swap(tb0, tb2);
std::swap(d1, d3);
}
else
{
if (sd3 == 0 && sd1 * sd2 < 0)
{
std::swap(tb0, tb2);
std::swap(d1, d3);
}
}
}
RatPlane pl2(stencil[tr2][0], stencil[tr2][1], stencil[tr2][3]);
cpp_rational d21 = pl2.distance(stencil[tr1][0]);
cpp_rational d22 = pl2.distance(stencil[tr1][1]);
cpp_rational d23 = pl2.distance(stencil[tr1][3]);
int32_t sd21 = d21.sign();
int32_t sd22 = d22.sign();
int32_t sd23 = d23.sign();
if (sd21 < 0 && sd22 < 0 && sd23 < 0)
return 0;
if (sd21 > 0 && sd22 > 0 && sd23 > 0)
return 0;
RVec3 ta0 = stencil[tr1][0];
RVec3 ta1 = stencil[tr1][1];
RVec3 ta2 = stencil[tr1][3];
if (sd21 * sd23 > 0)
{
std::swap(ta1, ta2);
std::swap(d22, d23);
}
else
{
if (sd22 * sd23 > 0)
{
std::swap(ta0, ta2);
std::swap(d21, d23);
}
else
{
if (sd23 == 0 && sd21 * sd22 < 0)
{
std::swap(ta0, ta2);
std::swap(d21, d23);
}
}
}
//////////////////////////////////////////////////
RVec3 dir = ta2 - ta0;
cpp_rational dirPlaneDot = dir.dot(pl2.n);
RVec3 pointOnIntersectionLine;
if (dirPlaneDot != 0)
{
pointOnIntersectionLine = ta0 - dir * (d21 / dirPlaneDot);
}
else
{
pointOnIntersectionLine = ta0;
}
RVec3 interLineDir = pl1.n.cross(pl2.n);
cpp_rational sqd = interLineDir.dot(interLineDir);
if (sqd.is_zero())
return 0;
cpp_rational t1p2 = (ta1 - pointOnIntersectionLine).dot(interLineDir) / sqd;
cpp_rational t1p3 = (ta2 - pointOnIntersectionLine).dot(interLineDir) / sqd;
cpp_rational t1p2param = t1p2;
if (d22 != d23)
{
t1p2param = t1p2 + (t1p3 - t1p2) * (d22 / (d22 - d23));
}
t1p2 = (tb0 - pointOnIntersectionLine).dot(interLineDir) / sqd;
t1p3 = (tb2 - pointOnIntersectionLine).dot(interLineDir) / sqd;
cpp_rational t2p1param = t1p2;
if (d1 != d3)
{
t2p1param = t1p2 + (t1p3 - t1p2) * d1 / (d1 - d3);
}
t1p2 = (tb1 - pointOnIntersectionLine).dot(interLineDir) / sqd;
cpp_rational t2p2param = t1p2;
if (d2 != d3)
{
t2p2param = t1p2 + (t1p3 - t1p2) * d2 / (d2 - d3);
}
cpp_rational beg1 = 0;
if (t1p2param < 0)
{
std::swap(beg1, t1p2param);
}
if (t2p2param < t2p1param)
{
std::swap(t2p2param, t2p1param);
}
cpp_rational minEnd = std::min(t1p2param, t2p2param);
cpp_rational maxBeg = std::max(beg1, t2p1param);
if (minEnd > maxBeg)
{
RVec3 p1 = pointOnIntersectionLine + interLineDir * maxBeg;
RVec3 p2 = pointOnIntersectionLine + interLineDir * minEnd;
stencil[tr1].push_back(p1);
stencil[tr1].push_back(p2);
stencil[tr2].push_back(p1);
stencil[tr2].push_back(p2);
return 1;
}
return 0;
}
int32_t intersectSegments(RVec3& s1, RVec3& e1, RVec3& s2, RVec3& e2, ProjectionDirections dir,
std::vector<cpp_rational>& t1v, std::vector<cpp_rational>& t2v)
{
RVec2 s1p = getProjectedPointWithWinding(s1, dir);
RVec2 e1p = getProjectedPointWithWinding(e1, dir);
RVec2 s2p = getProjectedPointWithWinding(s2, dir);
RVec2 e2p = getProjectedPointWithWinding(e2, dir);
RVec2 dir1 = e1p - s1p;
RVec2 dir2 = s2p - e2p;
cpp_rational crs = dir1.cross(dir2);
if (crs != 0)
{
cpp_rational c1 = s2p.x - s1p.x;
cpp_rational c2 = s2p.y - s1p.y;
cpp_rational det1 = c1 * dir2.y - c2 * dir2.x;
cpp_rational det2 = dir1.x * c2 - dir1.y * c1;
cpp_rational t1 = det1 / crs;
cpp_rational t2 = det2 / crs;
if (t1 > 0 && t1 < 1 && (t2 >= 0 && t2 <= 1))
{
t1v.push_back(t1);
}
if (t2 > 0 && t2 < 1 && (t1 >= 0 && t1 <= 1))
{
t2v.push_back(t2);
}
}
else
{
if (dir1.cross(s2p - s1p) == 0)
{
if (dir1.x != 0)
{
cpp_rational t1 = (s2p.x - s1p.x) / dir1.x;
cpp_rational t2 = (e2p.x - s1p.x) / dir1.x;
if (t1 > 0 && t1 < 1)
t1v.push_back(t1);
if (t2 > 0 && t2 < 1)
t1v.push_back(t2);
}
else
{
if (dir1.y != 0)
{
cpp_rational t1 = (s2p.y - s1p.y) / dir1.y;
cpp_rational t2 = (e2p.y - s1p.y) / dir1.y;
if (t1 > 0 && t1 < 1)
t1v.push_back(t1);
if (t2 > 0 && t2 < 1)
t1v.push_back(t2);
}
}
}
if (dir2.cross(s1p - s2p) == 0)
{
dir2 = e2p - s2p;
if (dir2.x != 0)
{
cpp_rational t1 = (s1p.x - s2p.x) / dir2.x;
cpp_rational t2 = (e1p.x - s2p.x) / dir2.x;
if (t1 > 0 && t1 < 1)
t2v.push_back(t1);
if (t2 > 0 && t2 < 1)
t2v.push_back(t2);
}
else
{
if (dir2.y != 0)
{
cpp_rational t1 = (s1p.y - s2p.y) / dir2.y;
cpp_rational t2 = (e1p.y - s2p.y) / dir2.y;
if (t1 > 0 && t1 < 1)
t2v.push_back(t1);
if (t2 > 0 && t2 < 1)
t2v.push_back(t2);
}
}
}
}
return 1;
}
struct RVec3Comparer
{
bool operator()(const RVec3& a, const RVec3& b) const
{
if (a.x < b.x)
return true;
if (a.x > b.x)
return false;
if (a.y < b.y)
return true;
if (a.y > b.y)
return false;
if (a.z < b.z)
return true;
return false;
}
};
void getBarycentricCoords(NvVec2& a, NvVec2& b, NvVec2& c, NvVec2& p, float& u, float& v)
{
NvVec3 v1(b.x - a.x, c.x - a.x, a.x - p.x);
NvVec3 v2(b.y - a.y, c.y - a.y, a.y - p.y);
NvVec3 resl = v1.cross(v2);
u = resl.x / resl.z;
v = resl.y / resl.z;
}
Mesh* MeshCleanerImpl::cleanMesh(const Mesh* mesh)
{
/**
======= Get mesh data ===========
*/
std::vector<Vertex> vertices;
std::vector<Edge> edges;
std::vector<Facet> facets;
vertices.resize(mesh->getVerticesCount());
edges.resize(mesh->getEdgesCount());
facets.resize(mesh->getFacetCount());
nvidia::NvBounds3 bnd;
bnd.setEmpty();
for (uint32_t i = 0; i < mesh->getVerticesCount(); ++i)
{
vertices[i] = mesh->getVertices()[i];
bnd.include(toNvShared(vertices[i].p));
}
for (uint32_t i = 0; i < mesh->getEdgesCount(); ++i)
{
edges[i] = mesh->getEdges()[i];
}
for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
{
facets[i] = mesh->getFacetsBuffer()[i];
}
//======================================
/**
Transform vertices to fit unit cube and snap them to grid.
**/
float scale = 1.0f / bnd.getExtents().abs().maxElement();
int32_t gridSize = 10000; // Grid resolution to which vertices position will be snapped.
for (uint32_t i = 0; i < mesh->getVerticesCount(); ++i)
{
vertices[i].p = (vertices[i].p - fromNvShared(bnd.minimum)) * scale;
vertices[i].p.x = std::floor(vertices[i].p.x * gridSize) / gridSize;
vertices[i].p.y = std::floor(vertices[i].p.y * gridSize) / gridSize;
vertices[i].p.z = std::floor(vertices[i].p.z * gridSize) / gridSize;
}
std::vector<std::vector<RVec3> > triangleStencil(facets.size());
std::vector<NvVec3> facetsNormals(facets.size());
std::vector<NvBounds3> facetBound(facets.size());
for (uint32_t tr1 = 0; tr1 < facets.size(); ++tr1)
{
if (facets[tr1].edgesCount != 3)
{
return nullptr;
}
int32_t fed = facets[tr1].firstEdgeNumber;
triangleStencil[tr1].push_back(vertices[edges[fed].s].p);
triangleStencil[tr1].push_back(vertices[edges[fed].e].p);
triangleStencil[tr1].push_back(vertices[edges[fed + 1].s].p);
triangleStencil[tr1].push_back(vertices[edges[fed + 1].e].p);
triangleStencil[tr1].push_back(vertices[edges[fed + 2].s].p);
triangleStencil[tr1].push_back(vertices[edges[fed + 2].e].p);
facetBound[tr1].setEmpty();
facetBound[tr1].include(toNvShared(vertices[edges[fed].s].p));
facetBound[tr1].include(toNvShared(vertices[edges[fed].e].p));
facetBound[tr1].include(toNvShared(vertices[edges[fed + 2].s].p));
facetBound[tr1].fattenFast(0.001f);
facetsNormals[tr1] = toNvShared(vertices[edges[fed + 1].s].p - vertices[edges[fed].s].p)
.cross(toNvShared(vertices[edges[fed + 2].s].p - vertices[edges[fed].s].p));
}
/**
Build intersections between all pairs of triangles.
*/
for (uint32_t tr1 = 0; tr1 < facets.size(); ++tr1)
{
if (triangleStencil[tr1].empty())
continue;
for (uint32_t tr2 = tr1 + 1; tr2 < facets.size(); ++tr2)
{
if (triangleStencil[tr2].empty())
continue;
if (facetBound[tr1].intersects(facetBound[tr2]) == false)
continue;
getTriangleIntersection3d(tr1, tr2, triangleStencil, getProjectionDirection(facetsNormals[tr1]));
}
}
/**
Reintersect all segments
*/
for (uint32_t tr = 0; tr < triangleStencil.size(); ++tr)
{
std::vector<RVec3>& ctr = triangleStencil[tr];
std::vector<std::vector<cpp_rational> > perSegmentInters(ctr.size() / 2);
for (uint32_t sg1 = 6; sg1 < ctr.size(); sg1 += 2)
{
for (uint32_t sg2 = sg1 + 2; sg2 < ctr.size(); sg2 += 2)
{
intersectSegments(ctr[sg1], ctr[sg1 + 1], ctr[sg2], ctr[sg2 + 1],
getProjectionDirection(facetsNormals[tr]), perSegmentInters[sg1 / 2],
perSegmentInters[sg2 / 2]);
}
}
std::vector<RVec3> newStencil;
newStencil.reserve(ctr.size());
for (uint32_t i = 0; i < ctr.size(); i += 2)
{
int32_t csm = i / 2;
if (perSegmentInters[csm].size() == 0)
{
newStencil.push_back(ctr[i]);
newStencil.push_back(ctr[i + 1]);
}
else
{
cpp_rational current = 0;
newStencil.push_back(ctr[i]);
std::sort(perSegmentInters[csm].begin(), perSegmentInters[csm].end());
for (size_t j = 0; j < perSegmentInters[csm].size(); ++j)
{
if (perSegmentInters[csm][j] > current)
{
current = perSegmentInters[csm][j];
RVec3 pnt = (ctr[i + 1] - ctr[i]) * current + ctr[i];
newStencil.push_back(pnt);
newStencil.push_back(pnt);
}
}
newStencil.push_back(ctr[i + 1]);
}
}
ctr = newStencil;
}
std::vector<RVec3> finalPoints;
std::vector<std::vector<Edge> > tsten(facets.size());
{
std::map<RVec3, uint32_t, RVec3Comparer> mapping;
for (uint32_t tr1 = 0; tr1 < triangleStencil.size(); ++tr1)
{
for (uint32_t j = 0; j < triangleStencil[tr1].size(); j += 2)
{
auto it = mapping.find(triangleStencil[tr1][j]);
int32_t pt = 0;
if (it == mapping.end())
{
mapping[triangleStencil[tr1][j]] = finalPoints.size();
pt = finalPoints.size();
finalPoints.push_back(triangleStencil[tr1][j]);
}
else
{
pt = it->second;
}
Edge newed;
newed.s = pt;
it = mapping.find(triangleStencil[tr1][j + 1]);
if (it == mapping.end())
{
mapping[triangleStencil[tr1][j + 1]] = finalPoints.size();
pt = finalPoints.size();
finalPoints.push_back(triangleStencil[tr1][j + 1]);
}
else
{
pt = it->second;
}
newed.e = pt;
bool hasNewEdge = false;
for (uint32_t e = 0; e < tsten[tr1].size(); ++e)
{
if (tsten[tr1][e].s == newed.s && tsten[tr1][e].e == newed.e)
{
hasNewEdge = true;
break;
}
if (tsten[tr1][e].e == newed.s && tsten[tr1][e].s == newed.e)
{
hasNewEdge = true;
break;
}
}
if (!hasNewEdge)
tsten[tr1].push_back(newed);
}
}
}
/**
Build constrained DT
*/
std::vector<DelTriangle> trs;
for (uint32_t i = 0; i < tsten.size(); ++i)
{
if (tsten[i].size() < 3)
continue;
if (tsten[i].size() > 3)
{
int32_t oldSize = trs.size();
buildCDT(finalPoints, tsten[i], trs, getProjectionDirection(facetsNormals[i]));
for (uint32_t k = oldSize; k < trs.size(); ++k)
trs[k].parentTriangle = i;
}
else
{
trs.push_back(DelTriangle());
trs.back().parentTriangle = i;
for (uint32_t v = 0; v < 3; ++v)
trs.back().p[v] = tsten[i][v].s;
}
}
/**
Remove 'deleted' triangles from array.
*/
{
std::vector<DelTriangle> trstemp;
trstemp.reserve(trs.size());
for (uint32_t i = 0; i < trs.size(); ++i)
{
if (trs[i].p[0] != -1)
trstemp.push_back(trs[i]);
}
trs = trstemp;
}
/**
Filter exterior surface
*/
std::vector<bool> fillingMask(trs.size(), false);
std::map<std::pair<int32_t, int32_t>, int32_t> edgeMap;
std::vector<std::vector<int32_t> > edgeToTriangleMapping;
for (uint32_t i = 0; i < trs.size(); ++i)
{
if (trs[i].p[0] == -1)
continue;
if (trs[i].p[0] == trs[i].p[1] || trs[i].p[2] == trs[i].p[1] || trs[i].p[2] == trs[i].p[0])
{
trs[i].p[0] = -1;
continue;
}
#if 0 // Filter null-area triangles.
if ((finalPoints[trs[i].p[1]] - finalPoints[trs[i].p[0]]).cross(finalPoints[trs[i].p[2]] - finalPoints[trs[i].p[0]]).isZero())
{
trs[i].p[0] = -1;
continue;
}
#endif
for (uint32_t k = 0; k < 3; ++k)
{
int32_t es = trs[i].p[k];
int32_t ee = trs[i].p[(k + 1) % 3];
if (es > ee)
{
std::swap(es, ee);
}
auto pr = std::make_pair(es, ee);
auto iter = edgeMap.find(pr);
if (iter == edgeMap.end())
{
edgeMap[pr] = edgeToTriangleMapping.size();
trs[i].n[k] = edgeToTriangleMapping.size();
edgeToTriangleMapping.resize(edgeToTriangleMapping.size() + 1);
edgeToTriangleMapping.back().push_back(i);
}
else
{
for (uint32_t j = 0; j < edgeToTriangleMapping[iter->second].size(); ++j)
{
if (trs[edgeToTriangleMapping[iter->second][j]].compare(trs[i]))
{
trs[i].p[0] = -1;
break;
}
}
if (trs[i].p[0] != -1)
{
trs[i].n[k] = iter->second;
edgeToTriangleMapping[iter->second].push_back(i);
}
}
}
}
std::queue<int32_t> trque;
float maxx = -1000;
int32_t best = 0;
for (uint32_t i = 0; i < trs.size(); ++i)
{
if (trs[i].p[0] == -1)
continue;
float m = std::max(
finalPoints[trs[i].p[0]].x.convert_to<float>(),
std::max(finalPoints[trs[i].p[1]].x.convert_to<float>(), finalPoints[trs[i].p[2]].x.convert_to<float>()));
if (m > maxx && facetsNormals[trs[i].parentTriangle].x > 0)
{
maxx = m;
best = i;
}
}
if (!trs.empty())
{
trque.push(best);
}
while (!trque.empty())
{
int32_t trid = trque.front();
fillingMask[trid] = true;
DelTriangle& tr = trs[trque.front()];
trque.pop();
for (uint32_t ed = 0; ed < 3; ++ed)
{
auto& tlist = edgeToTriangleMapping[tr.n[ed]];
if (tlist.size() == 2)
{
for (uint32_t k = 0; k < tlist.size(); ++k)
{
int32_t to = tlist[k];
if (to != trid && !fillingMask[to] && edgeToTriangleMapping[trs[to].n[0]].size() > 0 &&
edgeToTriangleMapping[trs[to].n[1]].size() > 0 && edgeToTriangleMapping[trs[to].n[2]].size() > 0)
{
trque.push(tlist[k]);
fillingMask[tlist[k]] = true;
}
}
}
if (tlist.size() > 2)
{
int32_t bestPath = (tlist[0] == trid) ? tlist[1] : tlist[0];
RVec3 start = finalPoints[trs[trid].p[ed]];
RVec3 axis = finalPoints[trs[trid].p[(ed + 1) % 3]] - start;
RVec3 nAxis = finalPoints[trs[trid].p[(ed + 2) % 3]] - start;
RVec3 normal = axis.cross(nAxis);
uint32_t op = trs[bestPath].getOppPoint(trs[trid].p[ed], trs[trid].p[(ed + 1) % 3]);
RVec3 dir2 = (finalPoints[op] - start);
RVec3 normal2 = dir2.cross(axis);
cpp_rational bestDir = normal.cross(normal2).dot(axis);
cpp_rational oldDist = normal2.dot(normal2);
for (uint32_t k = 0; k < tlist.size(); ++k)
{
if (tlist[k] == trid)
continue;
op = trs[tlist[k]].getOppPoint(trs[trid].p[ed], trs[trid].p[(ed + 1) % 3]);
dir2 = (finalPoints[op] - start);
normal2 = dir2.cross(axis);
cpp_rational newOne = normal.cross(normal2).dot(axis);
if (newOne * oldDist < bestDir * normal2.dot(normal2))
{
oldDist = normal2.dot(normal2);
bestPath = tlist[k];
bestDir = newOne;
}
}
if (!fillingMask[bestPath] && edgeToTriangleMapping[trs[bestPath].n[0]].size() > 0 &&
edgeToTriangleMapping[trs[bestPath].n[1]].size() > 0 &&
edgeToTriangleMapping[trs[bestPath].n[2]].size() > 0)
{
trque.push(bestPath);
fillingMask[bestPath] = true;
}
}
edgeToTriangleMapping[tr.n[ed]].clear();
}
}
for (uint32_t id = 0; id < trs.size(); ++id)
{
if (!fillingMask[id])
{
trs[id].p[0] = -1; // Remove triangle
}
}
/////////////////////////////////////////////////////////////////////////////////////////////
std::vector<NvVec3> newVertices;
newVertices.resize(finalPoints.size());
for (uint32_t i = 0; i < finalPoints.size(); ++i)
{
newVertices[i].x = finalPoints[i].x.convert_to<float>();
newVertices[i].y = finalPoints[i].y.convert_to<float>();
newVertices[i].z = finalPoints[i].z.convert_to<float>();
}
/**
Rescale mesh to initial coordinates.
*/
for (uint32_t i = 0; i < finalPoints.size(); ++i)
{
newVertices[i] = newVertices[i] * (1.0f / scale) + bnd.minimum;
}
for (uint32_t i = 0; i < vertices.size(); ++i)
{
vertices[i].p = vertices[i].p * (1.0f / scale) + fromNvShared(bnd.minimum);
}
std::vector<Triangle> result;
result.reserve(trs.size());
{
std::vector<NvVec2> projectedTriangles(facets.size() * 3);
std::vector<Vertex> normalTriangles(facets.size() * 3);
for (uint32_t i = 0; i < facets.size(); ++i)
{
for (uint32_t k = 0; k < 3; ++k)
{
normalTriangles[i * 3 + k] = vertices[edges[facets[i].firstEdgeNumber + k].s];
projectedTriangles[i * 3 + k] = getProjectedPointWithWinding(
vertices[edges[facets[i].firstEdgeNumber + k].s].p, getProjectionDirection(facetsNormals[i])).toVec2();
}
}
for (uint32_t i = 0; i < trs.size(); ++i)
{
if (trs[i].p[0] == -1)
continue;
int32_t id = 0;
int32_t parentTriangle = trs[i].parentTriangle;
float u = 0, v = 0;
result.resize(result.size() + 1);
result.back().materialId = facets[parentTriangle].materialId;
result.back().smoothingGroup = facets[parentTriangle].smoothingGroup;
for (auto vert : { &result.back().a, &result.back().b, &result.back().c })
{
toNvShared(vert->p) = newVertices[trs[i].p[id]];
NvVec2 p = getProjectedPointWithWinding(vert->p, getProjectionDirection(facetsNormals[parentTriangle])).toVec2();
getBarycentricCoords(projectedTriangles[parentTriangle * 3], projectedTriangles[parentTriangle * 3 + 1],
projectedTriangles[parentTriangle * 3 + 2], p, u, v);
vert->uv[0] = (1 - u - v) * normalTriangles[parentTriangle * 3].uv[0] +
u * normalTriangles[parentTriangle * 3 + 1].uv[0] +
v * normalTriangles[parentTriangle * 3 + 2].uv[0];
vert->n = (1 - u - v) * normalTriangles[parentTriangle * 3].n +
u * normalTriangles[parentTriangle * 3 + 1].n + v * normalTriangles[parentTriangle * 3 + 2].n;
++id;
}
}
}
/**
Reuse old buffers to create Mesh
*/
std::vector<NvcVec3> newMeshVertices(result.size() * 3);
std::vector<NvcVec3> newMeshNormals(result.size() * 3);
std::vector<NvcVec2> newMeshUvs(result.size() * 3);
std::vector<int32_t> newMaterialIds(result.size());
std::vector<int32_t> newSmoothingGroups(result.size());
for (uint32_t i = 0; i < result.size(); ++i)
{
Vertex* arr[3] = { &result[i].a, &result[i].b, &result[i].c };
for (uint32_t k = 0; k < 3; ++k)
{
newMeshVertices[i * 3 + k] = arr[k]->p;
newMeshNormals[i * 3 + k] = arr[k]->n;
newMeshUvs[i * 3 + k] = arr[k]->uv[0];
}
}
std::vector<uint32_t> serializedIndices;
serializedIndices.reserve(result.size() * 3);
int32_t cindex = 0;
for (uint32_t i = 0; i < result.size(); ++i)
{
newMaterialIds[i] = result[i].materialId;
newSmoothingGroups[i] = result[i].smoothingGroup;
for (uint32_t pi = 0; pi < 3; ++pi)
serializedIndices.push_back(cindex++);
}
MeshImpl* rMesh = new MeshImpl(newMeshVertices.data(), newMeshNormals.data(), newMeshUvs.data(),
static_cast<uint32_t>(newMeshVertices.size()), serializedIndices.data(),
static_cast<uint32_t>(serializedIndices.size()));
rMesh->setMaterialId(newMaterialIds.data());
rMesh->setSmoothingGroup(newSmoothingGroups.data());
return rMesh;
}
void MeshCleanerImpl::release()
{
delete this;
}
|
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringMeshNoiser.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
#ifndef NVBLASTEXTAUTHORINGMESHNOISER_H
#define NVBLASTEXTAUTHORINGMESHNOISER_H
#include <vector>
#include <map>
#include "NvBlastExtAuthoringInternalCommon.h"
namespace Nv
{
namespace Blast
{
class SimplexNoise;
/**
Structure used on tesselation stage. Maps edge to two neighbor triangles
*/
struct EdgeToTriangles
{
int32_t tr[2];
int32_t c;
EdgeToTriangles()
{
c = 0;
}
/**
Add triangle to edge. Should not be called more than twice for one edge!!!!.
*/
void add(int32_t t)
{
tr[c] = t;
++c;
}
/**
Replaces mapping from one triangle to another.
*/
void replace(int32_t from, int32_t to)
{
if (tr[0] == from)
{
tr[0] = to;
}
else
{
if (c == 2 && tr[1] == from)
{
tr[1] = to;
}
}
}
/**
Get triangle which is mapped by this edge and which index is different than provided.
*/
int32_t getNot(int32_t id)
{
if (tr[0] != id)
{
return tr[0];
}
if (c == 2 && tr[1] != id)
{
return tr[1];
}
return -1;
}
};
/**
Tool for graphic mesh tesselation and adding noise to internal surface. Each triangle must have initialized
Triangle::userInfo field (0 for external surface triangles and != 0 for internal)
*/
class MeshNoiser
{
public:
MeshNoiser()
{
reset();
}
void reset();
/**
Edge flags
*/
enum EdgeFlag { INTERNAL_EDGE, EXTERNAL_BORDER_EDGE, INTERNAL_BORDER_EDGE, EXTERNAL_EDGE, NONE };
/**
Set mesh to tesselate and apply noise
*/
void setMesh(const std::vector<Triangle>& mesh);
/**
Tesselate internal surface.
\param[in] maxLen - maximal length of edge on internal surface.
*/
void tesselateInternalSurface(float maxLen);
/**
Apply noise to internal surface. Must be called only after tesselation!!!
\param[in] noise - noise generator
\param[in] falloff - damping of noise around of external surface
\param[in] relaxIterations - number of smoothing iterations before applying noise
\param[in] relaxFactor - amount of smooting before applying noise.
*/
void applyNoise(SimplexNoise& noise, float falloff, int32_t relaxIterations, float relaxFactor);
std::vector<Triangle> getMesh();
private:
nvidia::NvVec3 mOffset;
float mScale;
bool isTesselated;
/**
Mesh data
*/
std::vector<Vertex> mVertices;
std::vector<TriangleIndexed> mTriangles;
std::vector<Edge> mEdges;
std::map<Vertex, int32_t, VrtComp> mVertMap;
std::map<Edge, int32_t> mEdgeMap;
/**
Final triangles.
*/
std::vector<Triangle> mResultTriangles;
int32_t addVerticeIfNotExist(const Vertex& p);
int32_t addEdge(const Edge& e);
int32_t findEdge(const Edge& e);
void collapseEdge(int32_t id);
void divideEdge(int32_t id);
void updateVertEdgeInfo();
void updateEdgeTriangleInfo();
void relax(int32_t iterations, float factor, std::vector<Vertex>& vertices);
void recalcNoiseDirs();
std::vector<bool> mRestrictionFlag;
std::vector<EdgeFlag> mEdgeFlag;
std::vector<EdgeToTriangles> mTrMeshEdToTr;
std::vector<int32_t> mVertexValence;
std::vector<std::vector<int32_t> > mVertexToTriangleMap;
std::vector<float> mVerticesDistances;
std::vector<nvidia::NvVec3> mVerticesNormalsSmoothed;
std::vector<uint32_t> mPositionMappedVrt;
std::vector<std::vector<int32_t> > mGeometryGraph;
void prebuildEdgeFlagArray();
void computePositionedMapping();
void computeFalloffAndNormals();
void prebuildTesselatedTriangles();
};
} // namespace Blast
} // namespace Nv
#endif // ! NVBLASTEXTAUTHORINGMESHNOISER_H
|
NVIDIA-Omniverse/PhysX/blast/source/sdk/extensions/authoring/NvBlastExtAuthoringTriangulator.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2016-2023 NVIDIA Corporation. All rights reserved.
// This warning arises when using some stl containers with older versions of VC
// c:\program files (x86)\microsoft visual studio 12.0\vc\include\xtree(1826): warning C4702: unreachable code
#include "NvPreprocessor.h"
#if NV_VC && NV_VC < 14
#pragma warning(disable : 4702)
#endif
#include "NvBlastExtAuthoringTriangulator.h"
#include "NvBlastExtAuthoringMesh.h"
#include "NvBlastExtAuthoringTypes.h"
#include "NvPreprocessor.h"
#include "NvBlastExtAuthoringBooleanToolImpl.h"
#include <NvBlastAssert.h>
#include <NvBlastNvSharedHelpers.h>
#include <math.h>
#include <algorithm>
#include <list>
#include <queue>
#include <set>
#include <vector>
using nvidia::NvVec2;
using nvidia::NvVec3;
namespace Nv
{
namespace Blast
{
// used with ear clipping algorithm to deal with floating point precision artifacts for nearly co-linear points
#define MIN_ANGLE (0.0001f)
// helper for ear clipping algorithm
// holds the vertex indices for the previous and next vertex in the facet
// along with the scaled area of the triangle defined by the 3 vertices
struct AdjVertInfo
{
uint32_t prev;
uint32_t next;
float scaledArea;
};
NV_FORCE_INLINE bool compareTwoFloats(float a, float b)
{
return std::abs(b - a) <= FLT_EPSILON * std::abs(b + a);
}
NV_FORCE_INLINE bool compareTwoVertices(const NvVec3& a, const NvVec3& b)
{
return compareTwoFloats(a.x, b.x) && compareTwoFloats(a.y, b.y) && compareTwoFloats(a.z, b.z);
}
NV_FORCE_INLINE bool compareTwoVertices(const NvVec2& a, const NvVec2& b)
{
return compareTwoFloats(a.x, b.x) && compareTwoFloats(a.y, b.y);
}
NV_FORCE_INLINE float getRotation(const NvVec2& a, const NvVec2& b)
{
return a.x * b.y - a.y * b.x;
}
NV_FORCE_INLINE bool pointInside(
const NvVec2& ba, const NvVec2& cb, const NvVec2& ac,
const NvVec2& a, const NvVec2& b, const NvVec2& c,
const NvVec2& pnt
) {
// Co-positional verts are not considered inside because that would break the exterior of the facet
if (compareTwoVertices(a, pnt) || compareTwoVertices(b, pnt) || compareTwoVertices(c, pnt))
{
return false;
}
const float v1 = getRotation(ba, (pnt - a).getNormalized());
const float v2 = getRotation(cb, (pnt - b).getNormalized());
const float v3 = getRotation(ac, (pnt - c).getNormalized());
// If the sign of all angles match, then the point is inside
// A 0 angle is considered inside because otherwise verts would get dropped during triangulation
return (v1 >= -MIN_ANGLE && v2 >= -MIN_ANGLE && v3 >= -MIN_ANGLE) ||
(v1 <= MIN_ANGLE && v2 <= MIN_ANGLE && v3 <= MIN_ANGLE);
}
static void updatePotentialEar(
uint32_t curr,
const Vertex* vert,
const ProjectionDirections& dir,
const std::map<uint32_t, AdjVertInfo>& adjVertInfoMap,
const std::list<uint32_t>& reflexVerts,
std::list<uint32_t>& potentialEars
) {
// remove from potential list if it exists already
// it will be added back if it is still a valid potential ear
const auto itr = std::find(potentialEars.begin(), potentialEars.end(), curr);
if (itr != potentialEars.end())
{
potentialEars.erase(itr);
}
// doing it this way so the map can be passed as a const reference, but it should always be fully populated
const auto mapItr = adjVertInfoMap.find(curr);
if (mapItr == adjVertInfoMap.end())
{
NVBLAST_ASSERT_WITH_MESSAGE(false, "this should never happen");
return;
}
// only convex verts need to be considered for potential ears
const AdjVertInfo& adjVertInfo = mapItr->second;
if (adjVertInfo.scaledArea <= 0.0f)
{
return;
}
// only need to check against reflex verts to see if they are inside potential ears
// convex verts can't be inside potential ears
if (reflexVerts.size())
{
const Vertex cV = vert[curr];
const Vertex pV = vert[adjVertInfo.prev];
const Vertex nV = vert[adjVertInfo.next];
const NvVec2 cVp = getProjectedPoint(cV.p, dir);
const NvVec2 pVp = getProjectedPoint(pV.p, dir);
const NvVec2 nVp = getProjectedPoint(nV.p, dir);
// if there are no other verts inside, then it is a potential ear
const NvVec2 ba = (nVp - cVp).getNormalized();
const NvVec2 cb = (pVp - nVp).getNormalized();
const NvVec2 ac = (cVp - pVp).getNormalized();
for (uint32_t vrt : reflexVerts)
{
// ignore reflex verts that are part of the tri being tested
if (vrt == adjVertInfo.prev || vrt == adjVertInfo.next)
{
continue;
}
const NvVec2 pnt = getProjectedPoint(vert[vrt].p, dir);
if (pointInside(ba, cb, ac, cVp, nVp, pVp, pnt))
{
return;
}
}
}
potentialEars.push_back(curr);
}
static void updateVertData(
uint32_t curr,
uint32_t prev,
uint32_t next,
const Vertex* vert,
const ProjectionDirections& dir,
std::map<uint32_t, AdjVertInfo>& adjVertInfoMap,
std::list<uint32_t>& reflexVerts
) {
// remove the index from the reflex list if there is already an entry for it
// it will be added back if it is still a reflex vertex
const auto reflexItr = std::find(reflexVerts.begin(), reflexVerts.end(), curr);
if (reflexItr != reflexVerts.end())
{
reflexVerts.erase(reflexItr);
}
// if next == prev it isn't a valid triangle
// this will happen when the facet has less than 3 verts in it
// no need to add them as reflex verts at that point, the algorithm is finishing up the final pass
float scaledArea = 0.0f;
if (prev != next)
{
const Vertex cV = vert[curr];
const Vertex pV = vert[prev];
const Vertex nV = vert[next];
const NvVec2 cVp = getProjectedPoint(cV.p, dir);
const NvVec2 pVp = getProjectedPoint(pV.p, dir);
const NvVec2 nVp = getProjectedPoint(nV.p, dir);
const NvVec2 prevEdge = (cVp - pVp);
const NvVec2 nextEdge = (nVp - cVp);
// use normalized vectors to get a better calc for the angle between them
float rot = getRotation(prevEdge.getNormalized(), nextEdge.getNormalized());
if (dir & OPPOSITE_WINDING)
rot = -rot;
if (rot > MIN_ANGLE)
{
// this is a valid convex vertex, calculate 2 * area (used for sorting later)
// actual area isn't needed because it is only used to compare with other ears, so relative numbers are fine
scaledArea = getRotation(prevEdge, nextEdge);
if (dir & OPPOSITE_WINDING)
scaledArea = -scaledArea;
}
else
{
// the angle is roughly 180 or greater, consider it a reflex vertex
reflexVerts.push_back(curr);
}
}
// the scaled area will be used to sort potential ears later
adjVertInfoMap[curr] = {prev, next, scaledArea};
}
void Triangulator::triangulatePolygonWithEarClipping(const std::vector<uint32_t>& inputPolygon, const Vertex* vert,
const ProjectionDirections& dir)
{
uint32_t vCount = static_cast<uint32_t>(inputPolygon.size());
if (vCount < 3)
{
return;
}
// High level of ear clipping algorithm:
//
// - find potential ears (3 consecutive verts that form a triangle fully inside the facet with no other points from the facet inside or on an edge)
// while (potential ears)
// - sort the potential ears by area
// - add tri formed by largest ear to output and remove vert from the tip of the ear from the facet
// - update potential ears for remaining 2 verts in the tri
//
// This will ensure that no sliver triangles are created
// start by building up vertex data and a list of reflex (interior angle >= 180) verts
std::list<uint32_t> reflexVerts;
std::list<uint32_t> potentialEars;
std::map<uint32_t, AdjVertInfo> adjVertInfoMap;
for (uint32_t curr = 0; curr < vCount; curr++)
{
const uint32_t prev = (curr == 0) ? vCount - 1 : curr - 1;
const uint32_t next = (curr == vCount - 1) ? 0 : curr + 1;
const uint32_t currIdx = inputPolygon[curr];
const uint32_t prevIdx = inputPolygon[prev];
const uint32_t nextIdx = inputPolygon[next];
updateVertData(currIdx, prevIdx, nextIdx, vert, dir, adjVertInfoMap, reflexVerts);
}
// build the list of potential ears defined by convex verts by checking any reflex vert is inside
for (auto pair : adjVertInfoMap)
{
// if a vert is not a reflex, it must be convex and should be considered as an ear
const uint32_t currIdx = pair.first;
if (std::find(reflexVerts.begin(), reflexVerts.end(), currIdx) == reflexVerts.end())
{
updatePotentialEar(currIdx, vert, dir, adjVertInfoMap, reflexVerts, potentialEars);
}
}
// descending sort by scaled area
auto compArea = [&adjVertInfoMap](const uint32_t& a, const uint32_t& b) -> bool
{
return (adjVertInfoMap[a].scaledArea > adjVertInfoMap[b].scaledArea);
};
while (potentialEars.size())
{
// sort the potential ear list based on the area of the triangles they form
potentialEars.sort(compArea);
// add the largest triangle to the output
const uint32_t curr = potentialEars.front();
const AdjVertInfo& adjVertInfo = adjVertInfoMap[curr];
mBaseMeshTriangles.push_back(TriangleIndexed(curr, adjVertInfo.prev, adjVertInfo.next));
// remove the ear tip from the potential ear list
potentialEars.pop_front();
// update data for the other 2 verts involved
const uint32_t prevPrev = adjVertInfoMap[adjVertInfo.prev].prev;
const uint32_t nextNext = adjVertInfoMap[adjVertInfo.next].next;
// vert data must be updated first for both
updateVertData(adjVertInfo.prev, prevPrev, adjVertInfo.next, vert, dir, adjVertInfoMap, reflexVerts);
updateVertData(adjVertInfo.next, adjVertInfo.prev, nextNext, vert, dir, adjVertInfoMap, reflexVerts);
// then potential ear list
updatePotentialEar(adjVertInfo.prev, vert, dir, adjVertInfoMap, reflexVerts, potentialEars);
updatePotentialEar(adjVertInfo.next, vert, dir, adjVertInfoMap, reflexVerts, potentialEars);
}
}
struct LoopInfo
{
LoopInfo()
{
used = false;
}
NvVec3 normal;
float area;
int32_t index;
bool used;
bool operator<(const LoopInfo& b) const
{
return area < b.area;
}
};
int32_t unitePolygons(std::vector<uint32_t>& externalLoop, std::vector<uint32_t>& internalLoop, Vertex* vrx,
const ProjectionDirections& dir)
{
if (externalLoop.size() < 3 || internalLoop.size() < 3)
return 1;
/**
Find point with maximum x-coordinate
*/
float x_max = -MAXIMUM_EXTENT;
int32_t mIndex = -1;
for (uint32_t i = 0; i < internalLoop.size(); ++i)
{
float nx = getProjectedPoint(vrx[internalLoop[i]].p, dir).x;
if (nx > x_max)
{
mIndex = i;
x_max = nx;
}
}
if (mIndex == -1)
{
return 1;
}
/**
Search for base point on external loop
*/
float minX = MAXIMUM_EXTENT;
int32_t vrtIndex = -1;
bool isFromBuffer = 0;
NvVec2 holePoint = getProjectedPoint(vrx[internalLoop[mIndex]].p, dir);
NvVec2 computedPoint;
for (uint32_t i = 0; i < externalLoop.size(); ++i)
{
int32_t nx = (i + 1) % externalLoop.size();
NvVec2 pnt1 = getProjectedPoint(vrx[externalLoop[i]].p, dir);
NvVec2 pnt2 = getProjectedPoint(vrx[externalLoop[nx]].p, dir);
if (pnt1.x < x_max && pnt2.x < x_max)
{
continue;
}
NvVec2 vc = pnt2 - pnt1;
if (vc.y == 0 && pnt1.y == holePoint.y)
{
if (pnt1.x < minX && pnt1.x < pnt2.x && pnt1.x > x_max)
{
minX = pnt1.x;
vrtIndex = i;
isFromBuffer = true;
}
if (pnt2.x < minX && pnt2.x < pnt1.x && pnt2.x > x_max)
{
minX = pnt2.x;
vrtIndex = nx;
isFromBuffer = true;
}
}
else
{
float t = (holePoint.y - pnt1.y) / vc.y;
if (t <= 1 && t >= 0)
{
NvVec2 tempPoint = vc * t + pnt1;
if (tempPoint.x < minX && tempPoint.x > x_max)
{
minX = tempPoint.x;
vrtIndex = i;
isFromBuffer = false;
computedPoint = tempPoint;
}
}
}
}
if (vrtIndex == -1)
{
// std::cout << "Triangulation: base vertex for inner loop is not found..." << std::endl;
return 1;
}
int32_t bridgePoint = -1;
float bestAngle = 100;
if (!isFromBuffer)
{
NvVec2 ex1 = getProjectedPoint(vrx[externalLoop[vrtIndex]].p, dir);
NvVec2 ex2 = getProjectedPoint(vrx[externalLoop[(vrtIndex + 1) % externalLoop.size()]].p, dir);
if (ex1.x > ex2.x)
{
vrtIndex = (vrtIndex + 1) % externalLoop.size();
ex1 = ex2;
}
/* Check if some point is inside triangle */
bool notFound = true;
const NvVec2 ba = (ex1 - holePoint).getNormalized();
const NvVec2 cb = (computedPoint - ex1).getNormalized();
const NvVec2 ac = (holePoint - computedPoint).getNormalized();
for (int32_t i = 0; i < (int32_t)externalLoop.size(); ++i)
{
const NvVec2 tempPoint = getProjectedPoint(vrx[externalLoop[i]].p, dir);
if (pointInside(ba, cb, ac, holePoint, ex1, computedPoint, tempPoint))
{
notFound = false;
const NvVec2 cVp = getProjectedPoint(vrx[externalLoop[i]].p, dir);
const NvVec2 pVp =
getProjectedPoint(vrx[externalLoop[(i - 1 + externalLoop.size()) % externalLoop.size()]].p, dir);
const NvVec2 nVp = getProjectedPoint(vrx[externalLoop[(i + 1) % externalLoop.size()]].p, dir);
float rt = getRotation((cVp - pVp).getNormalized(), (nVp - pVp).getNormalized());
if (dir & OPPOSITE_WINDING)
rt = -rt;
if (rt < MIN_ANGLE)
continue;
const float tempAngle = NvVec2(1, 0).dot((tempPoint - holePoint).getNormalized());
if (bestAngle < tempAngle)
{
bestAngle = tempAngle;
bridgePoint = i;
}
}
}
if (notFound)
{
bridgePoint = vrtIndex;
}
if (bridgePoint == -1)
{
// std::cout << "Triangulation: bridge vertex for inner loop is not found..." << std::endl;
return 1;
}
}
else
{
bridgePoint = vrtIndex;
}
std::vector<uint32_t> temporal;
for (int32_t i = 0; i <= bridgePoint; ++i)
{
temporal.push_back(externalLoop[i]);
}
temporal.push_back(internalLoop[mIndex]);
for (int32_t i = (mIndex + 1) % internalLoop.size(); i != mIndex; i = (i + 1) % internalLoop.size())
{
temporal.push_back(internalLoop[i]);
}
temporal.push_back(internalLoop[mIndex]);
for (uint32_t i = bridgePoint; i < externalLoop.size(); ++i)
{
temporal.push_back(externalLoop[i]);
}
externalLoop = temporal;
return 0;
}
void Triangulator::buildPolygonAndTriangulate(std::vector<Edge>& edges, Vertex* vertices, int32_t userData,
int32_t materialId, int32_t smoothingGroup)
{
std::vector<std::vector<uint32_t> > serializedLoops;
std::set<int> visitedVertices;
std::vector<int> used(edges.size(), 0);
uint32_t collected = 0;
std::vector<int> edgesIds;
/**
Add first edge to polygon
*/
edgesIds.push_back(0);
visitedVertices.insert(edges[0].s);
visitedVertices.insert(edges[0].e);
used[0] = true;
collected = 1;
uint32_t lastEdge = 0;
bool successfullPass = false;
for (; collected < edges.size();)
{
successfullPass = false;
for (uint32_t p = 0; p < edges.size(); ++p)
{
if (used[p] == 0 && edges[p].s == edges[lastEdge].e)
{
successfullPass = true;
collected++;
used[p] = true;
edgesIds.push_back(p);
lastEdge = p;
if (visitedVertices.find(edges[p].e) != visitedVertices.end()) // if we formed loop, detach it and
// triangulate
{
serializedLoops.push_back(std::vector<uint32_t>());
std::vector<uint32_t>& serializedPositions = serializedLoops.back();
while (edgesIds.size() > 0)
{
serializedPositions.push_back(edges[edgesIds.back()].s);
visitedVertices.erase(edges[edgesIds.back()].s);
if (edges[edgesIds.back()].s == edges[p].e)
{
edgesIds.pop_back();
break;
}
edgesIds.pop_back();
}
if (edgesIds.size() > 0)
{
lastEdge = edgesIds.back();
}
else
{
for (uint32_t t = 0; t < edges.size(); ++t)
{
if (used[t] == 0)
{
edgesIds.push_back(t);
visitedVertices.insert(edges[t].s);
visitedVertices.insert(edges[t].e);
used[t] = true;
collected++;
lastEdge = t;
break;
}
}
}
}
else
{
visitedVertices.insert(edges[p].e);
}
}
}
if (!successfullPass)
{
break;
}
}
std::vector<LoopInfo> loopsInfo(serializedLoops.size());
// Compute normal to whole polygon, and areas of loops
NvVec3 wholeFacetNormal(0, 0, 0);
for (uint32_t loop = 0; loop < serializedLoops.size(); ++loop)
{
NvVec3 loopNormal(0, 0, 0);
const std::vector<uint32_t>& pos = serializedLoops[loop];
for (uint32_t vrt = 1; vrt + 1 < serializedLoops[loop].size(); ++vrt)
{
loopNormal += toNvShared(vertices[pos[vrt]].p - vertices[pos[0]].p)
.cross(toNvShared(vertices[pos[vrt + 1]].p - vertices[pos[0]].p));
}
loopsInfo[loop].area = loopNormal.magnitude();
loopsInfo[loop].normal = loopNormal;
loopsInfo[loop].index = loop;
wholeFacetNormal += loopNormal;
}
// Change areas signs according to winding direction
for (uint32_t loop = 0; loop < serializedLoops.size(); ++loop)
{
if (wholeFacetNormal.dot(loopsInfo[loop].normal) < 0)
{
loopsInfo[loop].area = -loopsInfo[loop].area;
}
}
const ProjectionDirections dir = getProjectionDirection(wholeFacetNormal);
std::sort(loopsInfo.begin(), loopsInfo.end());
std::vector<NvVec3> tempPositions;
int32_t oldSize = static_cast<int32_t>(mBaseMeshTriangles.size());
for (uint32_t extPoly = 0; extPoly < loopsInfo.size(); ++extPoly)
{
if (loopsInfo[extPoly].area < 0)
{
continue; // Polygon with negative area is hole
}
int32_t baseLoop = loopsInfo[extPoly].index;
for (uint32_t intPoly = 0; intPoly < loopsInfo.size(); ++intPoly)
{
if (loopsInfo[intPoly].area > 0 || loopsInfo[intPoly].used ||
std::abs(loopsInfo[intPoly].area) > loopsInfo[extPoly].area)
{
continue;
}
int32_t holeLoop = loopsInfo[intPoly].index;
if (!unitePolygons(serializedLoops[baseLoop], serializedLoops[holeLoop], vertices, dir))
{
loopsInfo[intPoly].used = true;
};
}
triangulatePolygonWithEarClipping(serializedLoops[baseLoop], vertices, dir);
}
for (uint32_t i = oldSize; i < mBaseMeshTriangles.size(); ++i)
{
mBaseMeshTriangles[i].userData = userData;
mBaseMeshTriangles[i].materialId = materialId;
mBaseMeshTriangles[i].smoothingGroup = smoothingGroup;
}
}
NV_FORCE_INLINE int32_t Triangulator::addVerticeIfNotExist(const Vertex& p)
{
auto it = mVertMap.find(p);
if (it == mVertMap.end())
{
mVertMap[p] = static_cast<int32_t>(mVertices.size());
mVertices.push_back(p);
return static_cast<int32_t>(mVertices.size()) - 1;
}
else
{
return it->second;
}
}
NV_FORCE_INLINE void Triangulator::addEdgeIfValid(EdgeWithParent& ed)
{
if (ed.s == ed.e)
return;
EdgeWithParent opposite(ed.e, ed.s, ed.parent);
auto it = mEdgeMap.find(opposite);
if (it == mEdgeMap.end())
{
mEdgeMap[ed] = static_cast<int32_t>(mBaseMeshEdges.size());
mBaseMeshEdges.push_back(ed);
}
else
{
if (mBaseMeshEdges[it->second].s == kNotValidVertexIndex)
{
mBaseMeshEdges[it->second].s = ed.s;
mBaseMeshEdges[it->second].e = ed.e;
}
else
{
mBaseMeshEdges[it->second].s = kNotValidVertexIndex;
}
}
}
void Triangulator::prepare(const Mesh* mesh)
{
const Edge* ed = mesh->getEdges();
const Vertex* vr = mesh->getVertices();
mBaseMapping.resize(mesh->getVerticesCount());
for (uint32_t i = 0; i < mesh->getFacetCount(); ++i)
{
const Facet* fc = mesh->getFacet(i);
for (uint32_t j = fc->firstEdgeNumber; j < fc->firstEdgeNumber + fc->edgesCount; ++j)
{
int32_t a = addVerticeIfNotExist(vr[ed[j].s]);
int32_t b = addVerticeIfNotExist(vr[ed[j].e]);
mBaseMapping[ed[j].s] = a;
mBaseMapping[ed[j].e] = b;
EdgeWithParent e(a, b, i);
addEdgeIfValid(e);
}
}
std::vector<EdgeWithParent> temp;
temp.reserve(mBaseMeshEdges.size());
for (uint32_t i = 0; i < mBaseMeshEdges.size(); ++i)
{
if (mBaseMeshEdges[i].s != kNotValidVertexIndex)
{
temp.push_back(mBaseMeshEdges[i]);
}
}
mBaseMeshEdges = temp;
}
void Triangulator::reset()
{
mVertices.clear();
mBaseMeshEdges.clear();
mVertMap.clear();
mEdgeMap.clear();
mBaseMeshTriangles.clear();
mBaseMeshResultTriangles.clear();
}
void Triangulator::triangulate(const Mesh* mesh)
{
reset();
if (mesh == nullptr || !mesh->isValid())
{
return;
}
prepare(mesh);
if (mBaseMeshEdges.empty())
{
return;
}
std::vector<Edge> temp;
uint32_t fP = mBaseMeshEdges[0].parent;
for (uint32_t i = 0; i < mBaseMeshEdges.size(); ++i)
{
if (fP != mBaseMeshEdges[i].parent)
{
if (temp.empty() == false)
{
buildPolygonAndTriangulate(temp, mVertices.data(), mesh->getFacet(fP)->userData,
mesh->getFacet(fP)->materialId, mesh->getFacet(fP)->smoothingGroup);
}
temp.clear();
fP = mBaseMeshEdges[i].parent;
}
temp.push_back({ mBaseMeshEdges[i].s, mBaseMeshEdges[i].e });
}
buildPolygonAndTriangulate(temp, mVertices.data(), mesh->getFacet(fP)->userData, mesh->getFacet(fP)->materialId,
mesh->getFacet(fP)->smoothingGroup);
/* Build final triangles */
mBaseMeshResultTriangles.clear();
for (uint32_t i = 0; i < mBaseMeshTriangles.size(); ++i)
{
if (mBaseMeshTriangles[i].ea == kNotValidVertexIndex)
{
continue;
}
mBaseMeshResultTriangles.push_back({ mVertices[mBaseMeshTriangles[i].ea], mVertices[mBaseMeshTriangles[i].eb],
mVertices[mBaseMeshTriangles[i].ec], mBaseMeshTriangles[i].userData,
mBaseMeshTriangles[i].materialId, mBaseMeshTriangles[i].smoothingGroup });
}
mBaseMeshUVFittedTriangles = mBaseMeshResultTriangles; // Uvs will be fitted later, in FractureTool.
computePositionedMapping();
}
void Triangulator::computePositionedMapping()
{
std::map<NvcVec3, int32_t, VrtPositionComparator> mPosMap;
mPositionMappedVrt.clear();
mPositionMappedVrt.resize(mVertices.size());
for (uint32_t i = 0; i < mVertices.size(); ++i)
{
auto it = mPosMap.find(mVertices[i].p);
if (it == mPosMap.end())
{
mPosMap[mVertices[i].p] = i;
mPositionMappedVrt[i] = i;
}
else
{
mPositionMappedVrt[i] = it->second;
}
}
}
} // namespace Blast
} // namespace Nv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.