file_path
stringlengths 21
202
| content
stringlengths 19
1.02M
| size
int64 19
1.02M
| lang
stringclasses 8
values | avg_line_length
float64 5.88
100
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuIncrementalAABBPruner.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
// PT: TODO: this class isn't actually used at the moment
#define COMPILE_INCREMENTAL_AABB_PRUNER
#ifdef COMPILE_INCREMENTAL_AABB_PRUNER
#include "common/PxProfileZone.h"
#include "CmVisualization.h"
#include "foundation/PxBitUtils.h"
#include "GuIncrementalAABBPruner.h"
#include "GuIncrementalAABBTree.h"
#include "GuCallbackAdapter.h"
#include "GuAABBTree.h"
#include "GuAABBTreeQuery.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuQuery.h"
using namespace physx;
using namespace Gu;
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
#define PARANOIA_CHECKS 0
IncrementalAABBPruner::IncrementalAABBPruner(PxU32 sceneLimit, PxU64 contextID) :
mAABBTree (NULL),
mPool (contextID, TRANSFORM_CACHE_GLOBAL),
mContextID (contextID)
{
mMapping.resizeUninitialized(sceneLimit);
mPool.preallocate(sceneLimit);
mChangedLeaves.reserve(sceneLimit);
}
IncrementalAABBPruner::~IncrementalAABBPruner()
{
release();
}
bool IncrementalAABBPruner::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count, bool )
{
PX_PROFILE_ZONE("SceneQuery.prunerAddObjects", mContextID);
if(!count)
return true;
const PxU32 valid = mPool.addObjects(results, bounds, data, transforms, count);
if(mAABBTree)
{
for(PxU32 i=0;i<valid;i++)
{
const PrunerHandle& handle = results[i];
const PoolIndex poolIndex = mPool.getIndex(handle);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = mAABBTree->insert(poolIndex, mPool.getCurrentWorldBoxes(), mChangedLeaves);
updateMapping(poolIndex, node);
}
#if PARANOIA_CHECKS
test();
#endif
}
return valid==count;
}
void IncrementalAABBPruner::updateMapping(const PoolIndex poolIndex, IncrementalAABBTreeNode* node)
{
// resize mapping if needed
if(mMapping.size() <= poolIndex)
{
mMapping.resize(mMapping.size() * 2);
}
// if a node was split we need to update the node indices and also the sibling indices
if(!mChangedLeaves.empty())
{
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
mMapping[node->getPrimitives(NULL)[j]] = node;
}
}
for(PxU32 i = 0; i < mChangedLeaves.size(); i++)
{
IncrementalAABBTreeNode* changedNode = mChangedLeaves[i];
PX_ASSERT(changedNode->isLeaf());
for(PxU32 j = 0; j < changedNode->getNbPrimitives(); j++)
{
mMapping[changedNode->getPrimitives(NULL)[j]] = changedNode;
}
}
}
else
{
mMapping[poolIndex] = node;
}
}
void IncrementalAABBPruner::updateObjects(const PrunerHandle* handles, PxU32 count, float inflation, const PxU32* boundsIndices, const PxBounds3* newBounds, const PxTransform32* newTransforms)
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateObjects", mContextID);
if(!count)
return;
if(handles && boundsIndices && newBounds)
mPool.updateAndInflateBounds(handles, boundsIndices, newBounds, newTransforms, count, inflation);
if(!mAABBTree)
return;
const PxBounds3* poolBounds = mPool.getCurrentWorldBoxes();
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
const PoolIndex poolIndex = mPool.getIndex(h);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = mAABBTree->update(mMapping[poolIndex], poolIndex, poolBounds, mChangedLeaves);
// we removed node during update, need to update the mapping
updateMapping(poolIndex, node);
}
#if PARANOIA_CHECKS
test();
#endif
}
void IncrementalAABBPruner::removeObjects(const PrunerHandle* handles, PxU32 count, PrunerPayloadRemovalCallback* removalCallback)
{
PX_PROFILE_ZONE("SceneQuery.prunerRemoveObjects", mContextID);
if(!count)
return;
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
const PoolIndex poolIndex = mPool.getIndex(h); // save the pool index for removed object
const PoolIndex poolRelocatedLastIndex = mPool.removeObject(h, removalCallback); // save the lastIndex returned by removeObject
if(mAABBTree)
{
IncrementalAABBTreeNode* node = mAABBTree->remove(mMapping[poolIndex], poolIndex, mPool.getCurrentWorldBoxes());
// if node moved to its parent
if (node && node->isLeaf())
{
for (PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
mMapping[index] = node;
}
}
mMapping[poolIndex] = mMapping[poolRelocatedLastIndex];
// fix indices if we made a swap
if(poolRelocatedLastIndex != poolIndex)
mAABBTree->fixupTreeIndices(mMapping[poolIndex], poolRelocatedLastIndex, poolIndex);
if(!mAABBTree->getNodes())
{
release();
}
}
}
#if PARANOIA_CHECKS
test();
#endif
}
bool IncrementalAABBPruner::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcbArgName) const
{
bool again = true;
if(mAABBTree && mAABBTree->getNodes())
{
OverlapCallbackAdapter pcb(pcbArgName, mPool);
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, AABBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
again = AABBTreeOverlap<true, CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
again = AABBTreeOverlap<true, SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
return again;
}
bool IncrementalAABBPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
bool again = true;
if(mAABBTree && mAABBTree->getNodes())
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
RaycastCallbackAdapter pcb(pcbArgName, mPool);
again = AABBTreeRaycast<true, true, IncrementalAABBTree, IncrementalAABBTreeNode, RaycastCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, aabb.getCenter(), unitDir, inOutDistance, aabb.getExtents(), pcb);
}
return again;
}
bool IncrementalAABBPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
bool again = true;
if(mAABBTree && mAABBTree->getNodes())
{
RaycastCallbackAdapter pcb(pcbArgName, mPool);
again = AABBTreeRaycast<false, true, IncrementalAABBTree, IncrementalAABBTreeNode, RaycastCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
}
return again;
}
// This isn't part of the pruner virtual interface, but it is part of the public interface
// of AABBPruner - it gets called by SqManager to force a rebuild, and requires a commit() before
// queries can take place
void IncrementalAABBPruner::purge()
{
release();
}
// Commit either performs a refit if background rebuild is not yet finished
// or swaps the current tree for the second tree rebuilt in the background
void IncrementalAABBPruner::commit()
{
PX_PROFILE_ZONE("SceneQuery.prunerCommit", mContextID);
if (!mAABBTree)
{
fullRebuildAABBTree();
return;
}
}
void IncrementalAABBPruner::fullRebuildAABBTree()
{
// Don't bother building an AABB-tree if there isn't a single static object
const PxU32 nbObjects = mPool.getNbActiveObjects();
if (!nbObjects)
return;
const PxU32 indicesSize = PxNextPowerOfTwo(nbObjects);
if(indicesSize > mMapping.size())
{
mMapping.resizeUninitialized(indicesSize);
}
// copy the temp optimized tree into the new incremental tree
mAABBTree = PX_NEW(IncrementalAABBTree)();
mAABBTree->build(AABBTreeBuildParams(INCR_NB_OBJECTS_PER_NODE, nbObjects, &mPool.getCurrentAABBTreeBounds()), mMapping);
#if PARANOIA_CHECKS
test();
#endif
}
void IncrementalAABBPruner::shiftOrigin(const PxVec3& shift)
{
mPool.shiftOrigin(shift);
if(mAABBTree)
mAABBTree->shiftOrigin(shift);
}
void IncrementalAABBPruner::visualize(PxRenderOutput& out, PxU32 primaryColor, PxU32 /*secondaryColor*/) const
{
// getAABBTree() asserts when pruner is dirty. NpScene::visualization() does not enforce flushUpdate. see DE7834
visualizeTree(out, primaryColor, mAABBTree);
// Render added objects not yet in the tree
//out << PxTransform(PxIdentity);
//out << PxU32(PxDebugColor::eARGB_WHITE);
}
void IncrementalAABBPruner::release() // this can be called from purge()
{
PX_DELETE(mAABBTree);
}
void IncrementalAABBPruner::test()
{
if(mAABBTree)
{
mAABBTree->hierarchyCheck(mPool.getNbActiveObjects(), mPool.getCurrentWorldBoxes());
for(PxU32 i = 0; i < mPool.getNbActiveObjects(); i++)
{
mAABBTree->checkTreeLeaf(mMapping[i], i);
}
}
}
void IncrementalAABBPruner::merge(const void* )
{
//const AABBPrunerMergeData& pruningStructure = *reinterpret_cast<const AABBPrunerMergeData*> (mergeParams);
//if(mAABBTree)
//{
// // index in pruning pool, where new objects were added
// const PxU32 pruningPoolIndex = mPool.getNbActiveObjects() - pruningStructure.mNbObjects;
// // create tree from given nodes and indices
// AABBTreeMergeData aabbTreeMergeParams(pruningStructure.mNbNodes, pruningStructure.mAABBTreeNodes,
// pruningStructure.mNbObjects, pruningStructure.mAABBTreeIndices, pruningPoolIndex);
// if (!mIncrementalRebuild)
// {
// // merge tree directly
// mAABBTree->mergeTree(aabbTreeMergeParams);
// }
// else
// {
// mBucketPruner.addTree(aabbTreeMergeParams, mTimeStamp);
// }
//}
}
void IncrementalAABBPruner::getGlobalBounds(PxBounds3& bounds) const
{
if(mAABBTree && mAABBTree->getNodes())
{
StoreBounds(bounds, mAABBTree->getNodes()->mBVMin, mAABBTree->getNodes()->mBVMax);
}
else
bounds.setEmpty();
}
#endif
| 12,574 | C++ | 30.516291 | 222 | 0.745904 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuPruningPool.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_PRUNING_POOL_H
#define GU_PRUNING_POOL_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPrunerTypedef.h"
#include "GuPrunerPayload.h"
#include "GuBounds.h"
#include "GuAABBTreeBounds.h"
namespace physx
{
namespace Gu
{
enum TransformCacheMode
{
TRANSFORM_CACHE_UNUSED,
TRANSFORM_CACHE_LOCAL,
TRANSFORM_CACHE_GLOBAL
};
// This class is designed to maintain a two way mapping between pair(PrunerPayload/userdata,AABB) and PrunerHandle
// Internally there's also an index for handles (AP: can be simplified?)
// This class effectively stores bounded pruner payloads/userdata, returns a PrunerHandle and allows O(1)
// access to them using a PrunerHandle
// Supported operations are add, remove, update bounds
class PX_PHYSX_COMMON_API PruningPool : public PxUserAllocated
{
PX_NOCOPY(PruningPool)
public:
PruningPool(PxU64 contextID, TransformCacheMode mode/*=TRANSFORM_CACHE_UNUSED*/);
~PruningPool();
PX_FORCE_INLINE const PrunerPayload& getPayloadData(PrunerHandle handle, PrunerPayloadData* data=NULL) const
{
const PoolIndex index = getIndex(handle);
if(data)
{
PxBounds3* wb = const_cast<PxBounds3*>(mWorldBoxes.getBounds());
data->mBounds = wb + index;
data->mTransform = mTransforms ? mTransforms + index : NULL;
}
return mObjects[index];
}
void shiftOrigin(const PxVec3& shift);
// PT: adds 'count' objects to the pool. Needs 'count' bounds and 'count' payloads passed as input. Writes out 'count' handles
// in 'results' array. Function returns number of successfully added objects, ideally 'count' but can be less in case we run
// out of memory.
PxU32 addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count);
// this function will swap the last object with the hole formed by removed PrunerHandle object
// and return the removed last object's index in the pool
PoolIndex removeObject(PrunerHandle h, PrunerPayloadRemovalCallback* removalCallback);
// Data access
PX_FORCE_INLINE PoolIndex getIndex(PrunerHandle h)const { return mHandleToIndex[h]; }
PX_FORCE_INLINE PrunerPayload* getObjects() const { return mObjects; }
PX_FORCE_INLINE const PxTransform* getTransforms() const { return mTransforms; }
PX_FORCE_INLINE PxTransform* getTransforms() { return mTransforms; }
PX_FORCE_INLINE bool setTransform(PrunerHandle handle, const PxTransform& transform)
{
if(!mTransforms)
return false;
mTransforms[getIndex(handle)] = transform;
return true;
}
PX_FORCE_INLINE PxU32 getNbActiveObjects() const { return mNbObjects; }
PX_FORCE_INLINE const PxBounds3* getCurrentWorldBoxes() const { return mWorldBoxes.getBounds(); }
PX_FORCE_INLINE PxBounds3* getCurrentWorldBoxes() { return mWorldBoxes.getBounds(); }
PX_FORCE_INLINE const AABBTreeBounds& getCurrentAABBTreeBounds() const { return mWorldBoxes; }
void updateAndInflateBounds(const PrunerHandle* handles, const PxU32* boundsIndices, const PxBounds3* newBounds, const PxTransform32* newTransforms, PxU32 count, float epsilon);
void preallocate(PxU32 entries);
// protected:
PxU32 mNbObjects; //!< Current number of objects
PxU32 mMaxNbObjects; //!< Max. number of objects (capacity for mWorldBoxes, mObjects)
//!< these arrays are parallel
AABBTreeBounds mWorldBoxes; //!< List of world boxes, stores mNbObjects, capacity=mMaxNbObjects
PrunerPayload* mObjects; //!< List of objects, stores mNbObjects, capacity=mMaxNbObjects
PxTransform* mTransforms;
const TransformCacheMode mTransformCacheMode;
// private:
PoolIndex* mHandleToIndex; //!< Maps from PrunerHandle to internal index (payload/userData index in mObjects)
PrunerHandle* mIndexToHandle; //!< Inverse map from objectIndex to PrunerHandle
// this is the head of a list of holes formed in mHandleToIndex by removed handles
// the rest of the list is stored in holes in mHandleToIndex (in place)
PrunerHandle mFirstRecycledHandle;
PxU64 mContextID;
bool resize(PxU32 newCapacity);
};
}
}
#endif
| 6,129 | C | 46.153846 | 187 | 0.715614 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuWindingNumberT.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_WINDING_NUMBER_T_H
#define GU_WINDING_NUMBER_T_H
/** \addtogroup geomutils
@{
*/
#include "GuTriangle.h"
#include "foundation/PxArray.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxVec3.h"
#include "GuBVH.h"
#include "GuAABBTreeQuery.h"
#include "GuAABBTreeNode.h"
#include "GuWindingNumberCluster.h"
namespace physx
{
namespace Gu
{
using Triangle = Gu::IndexedTriangleT<PxI32>;
template<typename R, typename V3>
struct SecondOrderClusterApproximationT : public ClusterApproximationT<R, V3>
{
PxMat33 WeightedOuterProductSum;
PX_FORCE_INLINE SecondOrderClusterApproximationT() {}
PX_FORCE_INLINE SecondOrderClusterApproximationT(R radius, R areaSum, const V3& weightedCentroid, const V3& weightedNormalSum, const PxMat33& weightedOuterProductSum) :
ClusterApproximationT<R, V3>(radius, areaSum, weightedCentroid, weightedNormalSum), WeightedOuterProductSum(weightedOuterProductSum)
{ }
};
//Evaluates a first order winding number approximation for a given cluster (cluster = bunch of triangles)
template<typename R, typename V3>
PX_FORCE_INLINE R firstOrderClusterApproximation(const V3& weightedCentroid, const V3& weightedNormalSum,
const V3& evaluationPoint)
{
const V3 dir = weightedCentroid - evaluationPoint;
const R l = dir.magnitude();
return (R(0.25 / 3.141592653589793238462643383) / (l * l * l)) * weightedNormalSum.dot(dir);
}
template<typename R, typename V3>
PX_FORCE_INLINE R clusterApproximation(const ClusterApproximationT<R, V3>& c, const V3& evaluationPoint)
{
return firstOrderClusterApproximation(c.WeightedCentroid, c.WeightedNormalSum, evaluationPoint);
}
//Evaluates a second order winding number approximation for a given cluster (cluster = bunch of triangles)
template<typename R, typename V3>
PX_FORCE_INLINE R secondOrderClusterApproximation(const V3& weightedCentroid, const V3& weightedNormalSum,
const PxMat33& weightedOuterProductSum, const V3& evaluationPoint)
{
const V3 dir = weightedCentroid - evaluationPoint;
const R l = dir.magnitude();
const R l2 = l * l;
const R scaling = R(0.25 / 3.141592653589793238462643383) / (l2 * l);
const R firstOrder = scaling * weightedNormalSum.dot(dir);
const R scaling2 = -R(3.0) * scaling / l2;
const R m11 = scaling + scaling2 * dir.x * dir.x, m12 = scaling2 * dir.x * dir.y, m13 = scaling2 * dir.x * dir.z;
const R m21 = scaling2 * dir.y * dir.x, m22 = scaling + scaling2 * dir.y * dir.y, m23 = scaling2 * dir.y * dir.z;
const R m31 = scaling2 * dir.z * dir.x, m32 = scaling2 * dir.z * dir.y, m33 = scaling + scaling2 * dir.z * dir.z;
return firstOrder + (weightedOuterProductSum.column0.x * m11 + weightedOuterProductSum.column1.x * m12 + weightedOuterProductSum.column2.x * m13 +
weightedOuterProductSum.column0.y * m21 + weightedOuterProductSum.column1.y * m22 + weightedOuterProductSum.column2.y * m23 +
weightedOuterProductSum.column0.z * m31 + weightedOuterProductSum.column1.z * m32 + weightedOuterProductSum.column2.z * m33);
}
template<typename R, typename V3>
PX_FORCE_INLINE R clusterApproximation(const SecondOrderClusterApproximationT<R, V3>& c, const V3& evaluationPoint)
{
return secondOrderClusterApproximation(c.WeightedCentroid, c.WeightedNormalSum, c.WeightedOuterProductSum, evaluationPoint);
}
//Computes parameters to approximately represent a cluster (cluster = bunch of triangles) to be used to compute a winding number approximation
template<typename R, typename V3>
void approximateCluster(const PxArray<PxI32>& triangleSet, PxU32 start, PxU32 end, const PxU32* triangles, const V3* points,
const PxArray<R>& triangleAreas, const PxArray<V3>& triangleNormalsTimesTriangleArea, const PxArray<V3>& triangleCentroids, ClusterApproximationT<R, V3>& cluster)
{
V3 weightedCentroid(0., 0., 0.);
R areaSum = 0;
V3 weightedNormalSum(0., 0., 0.);
for (PxU32 i = start; i < end; ++i)
{
PxI32 triId = triangleSet[i];
areaSum += triangleAreas[triId];
weightedCentroid += triangleCentroids[triId] * triangleAreas[triId];
weightedNormalSum += triangleNormalsTimesTriangleArea[triId];
}
weightedCentroid = weightedCentroid / areaSum;
R radiusSquared = 0;
for (PxU32 i = start; i < end; ++i)
{
PxI32 triId = triangleSet[i];
const PxU32* tri = &triangles[3 * triId];
R d2 = (weightedCentroid - points[tri[0]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
d2 = (weightedCentroid - points[tri[1]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
d2 = (weightedCentroid - points[tri[2]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
}
cluster = ClusterApproximationT<R, V3>(PxSqrt(radiusSquared), areaSum, weightedCentroid, weightedNormalSum/*, weightedOuterProductSum*/);
}
//Computes parameters to approximately represent a cluster (cluster = bunch of triangles) to be used to compute a winding number approximation
template<typename R, typename V3>
void approximateCluster(const PxArray<PxI32>& triangleSet, PxU32 start, PxU32 end, const PxU32* triangles, const V3* points,
const PxArray<R>& triangleAreas, const PxArray<V3>& triangleNormalsTimesTriangleArea, const PxArray<V3>& triangleCentroids, SecondOrderClusterApproximationT<R, V3>& cluster)
{
V3 weightedCentroid(0., 0., 0.);
R areaSum = 0;
V3 weightedNormalSum(0., 0., 0.);
for (PxU32 i = start; i < end; ++i)
{
PxI32 triId = triangleSet[i];
areaSum += triangleAreas[triId];
weightedCentroid += triangleCentroids[triId] * triangleAreas[triId];
weightedNormalSum += triangleNormalsTimesTriangleArea[triId];
}
weightedCentroid = weightedCentroid / areaSum;
R radiusSquared = 0;
PxMat33 weightedOuterProductSum(PxZERO::PxZero);
for (PxU32 i = start; i < end; ++i)
{
PxI32 triId = triangleSet[i];
const PxU32* tri = &triangles[3 * triId];
R d2 = (weightedCentroid - points[tri[0]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
d2 = (weightedCentroid - points[tri[1]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
d2 = (weightedCentroid - points[tri[2]]).magnitudeSquared();
if (d2 > radiusSquared) radiusSquared = d2;
weightedOuterProductSum = weightedOuterProductSum + PxMat33::outer(triangleCentroids[triId] - weightedCentroid, triangleNormalsTimesTriangleArea[triId]);
}
cluster = SecondOrderClusterApproximationT<R, V3>(PxSqrt(radiusSquared), areaSum, weightedCentroid, weightedNormalSum, weightedOuterProductSum);
}
//Exact winding number evaluation, needs to be called for every triangle close to the winding number query point
template<typename R, typename V3>
PX_FORCE_INLINE R evaluateExact(V3 a, V3 b, V3 c, const V3& p)
{
const R twoOver4PI = R(0.5 / 3.141592653589793238462643383);
a -= p;
b -= p;
c -= p;
const R la = a.magnitude(),
lb = b.magnitude(),
lc = c.magnitude();
const R y = a.x * b.y * c.z - a.x * b.z * c.y - a.y * b.x * c.z + a.y * b.z * c.x + a.z * b.x * c.y - a.z * b.y * c.x;
const R x = (la * lb * lc + (a.x * b.x + a.y * b.y + a.z * b.z) * lc +
(b.x * c.x + b.y * c.y + b.z * c.z) * la + (c.x * a.x + c.y * a.y + c.z * a.z) * lb);
return twoOver4PI * PxAtan2(y, x);
}
struct Section
{
PxI32 start;
PxI32 end;
Section(PxI32 s, PxI32 e) : start(s), end(e)
{}
};
//Helper method that recursively traverses the given BVH tree and computes a cluster approximation for every node and links it to the node
template<typename R, typename V3>
void precomputeClusterInformation(PxI32 nodeId, const BVHNode* tree, const PxU32* triangles, const PxU32 numTriangles,
const V3* points, PxHashMap<PxU32, ClusterApproximationT<R, V3>>& infos, const PxArray<R> triangleAreas,
const PxArray<V3>& triangleNormalsTimesTriangleArea, const PxArray<V3>& triangleCentroids)
{
PxArray<PxI32> stack;
stack.pushBack(nodeId);
PxArray<Section> returnStack;
PxArray<PxI32> triIndices;
triIndices.reserve(numTriangles);
infos.reserve(PxU32(1.2f*numTriangles));
while (stack.size() > 0)
{
nodeId = stack.popBack();
if (nodeId >= 0)
{
const BVHNode& node = tree[nodeId];
if (node.isLeaf())
{
triIndices.pushBack(node.getPrimitiveIndex());
returnStack.pushBack(Section(triIndices.size() - 1, triIndices.size()));
continue;
}
stack.pushBack(-nodeId - 1); //Marker for return index
stack.pushBack(node.getPosIndex());
stack.pushBack(node.getPosIndex() + 1);
}
else
{
Section trianglesA = returnStack.popBack();
Section trianglesB = returnStack.popBack();
Section sum(trianglesB.start, trianglesA.end);
nodeId = -nodeId - 1;
ClusterApproximationT<R, V3> c;
approximateCluster<R, V3>(triIndices, sum.start, sum.end, triangles, points, triangleAreas, triangleNormalsTimesTriangleArea, triangleCentroids, c);
infos.insert(PxU32(nodeId), c);
returnStack.pushBack(sum);
}
}
}
//Precomputes a cluster approximation for every node in the BVH tree
template<typename R, typename V3>
void precomputeClusterInformation(const BVHNode* tree, const PxU32* triangles, const PxU32 numTriangles,
const V3* points, PxHashMap<PxU32, ClusterApproximationT<R, V3>>& result, PxI32 rootNodeIndex)
{
PxArray<R> triangleAreas;
triangleAreas.resize(numTriangles);
PxArray<V3> triangleNormalsTimesTriangleArea;
triangleNormalsTimesTriangleArea.resize(numTriangles);
PxArray<V3> triangleCentroids;
triangleCentroids.resize(numTriangles);
for (PxU32 i = 0; i < numTriangles; ++i)
{
const PxU32* tri = &triangles[3 * i];
const V3& a = points[tri[0]];
const V3& b = points[tri[1]];
const V3& c = points[tri[2]];
triangleNormalsTimesTriangleArea[i] = (b - a).cross(c - a) * R(0.5);
triangleAreas[i] = triangleNormalsTimesTriangleArea[i].magnitude();
triangleCentroids[i] = (a + b + c) * R(1.0 / 3.0);
}
result.clear();
precomputeClusterInformation(rootNodeIndex, tree, triangles, numTriangles, points, result, triangleAreas, triangleNormalsTimesTriangleArea, triangleCentroids);
}
template<typename R, typename V3>
class WindingNumberTraversalController
{
public:
R mWindingNumber = 0;
private:
const PxU32* mTriangles;
const V3* mPoints;
const PxHashMap<PxU32, ClusterApproximationT<R, V3>>& mClusters;
V3 mQueryPoint;
R mDistanceThresholdBeta;
public:
PX_FORCE_INLINE WindingNumberTraversalController(const PxU32* triangles, const V3* points,
const PxHashMap<PxU32, ClusterApproximationT<R, V3>>& clusters, const V3& queryPoint, R distanceThresholdBeta = 2)
: mTriangles(triangles), mPoints(points), mClusters(clusters), mQueryPoint(queryPoint), mDistanceThresholdBeta(distanceThresholdBeta)
{ }
PX_FORCE_INLINE Gu::TraversalControl::Enum analyze(const BVHNode& node, PxI32 nodeIndex)
{
if (node.isLeaf())
{
PX_ASSERT(node.getNbPrimitives() == 1);
const PxU32* tri = &mTriangles[3 * node.getPrimitiveIndex()];
mWindingNumber += evaluateExact<R, V3>(mPoints[tri[0]], mPoints[tri[1]], mPoints[tri[2]], mQueryPoint);
return Gu::TraversalControl::eDontGoDeeper;
}
const ClusterApproximationT<R, V3>& cluster = mClusters.find(nodeIndex)->second;
const R distSquared = (mQueryPoint - cluster.WeightedCentroid).magnitudeSquared();
const R threshold = mDistanceThresholdBeta * cluster.Radius;
if (distSquared > threshold * threshold)
{
//mWindingNumber += secondOrderClusterApproximation(cluster.WeightedCentroid, cluster.WeightedNormalSum, cluster.WeightedOuterProductSum, mQueryPoint);
mWindingNumber += firstOrderClusterApproximation<R, V3>(cluster.WeightedCentroid, cluster.WeightedNormalSum, mQueryPoint); // secondOrderClusterApproximation(cluster.WeightedCentroid, cluster.WeightedNormalSum, cluster.WeightedOuterProductSum, mQueryPoint);
return Gu::TraversalControl::eDontGoDeeper;
}
return Gu::TraversalControl::eGoDeeper;
}
private:
PX_NOCOPY(WindingNumberTraversalController)
};
template<typename R, typename V3>
R computeWindingNumber(const BVHNode* tree, const V3& q, R beta, const PxHashMap<PxU32, ClusterApproximationT<R, V3>>& clusters,
const PxU32* triangles, const V3* points)
{
WindingNumberTraversalController<R, V3> c(triangles, points, clusters, q, beta);
traverseBVH<WindingNumberTraversalController<R, V3>>(tree, c);
return c.mWindingNumber;
}
}
}
/** @} */
#endif
| 14,073 | C | 41.264264 | 261 | 0.732822 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBTree.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREE_H
#define GU_AABBTREE_H
#include "foundation/PxMemory.h"
#include "foundation/PxArray.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxUserAllocated.h"
#include "common/PxPhysXCommonConfig.h"
#include "GuPrunerTypedef.h"
namespace physx
{
namespace Gu
{
struct BVHNode;
struct SAH_Buffers;
class NodeAllocator;
struct BuildStats;
class AABBTreeBounds;
// PT: TODO: sometimes we export member functions, sometimes we export the whole class. What's the story here?
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4251 ) // class needs to have dll-interface to be used by clients of class
#endif
//! Contains AABB-tree build parameters
class PX_PHYSX_COMMON_API AABBTreeBuildParams : public PxUserAllocated
{
public:
AABBTreeBuildParams(PxU32 limit = 1, PxU32 nb_prims = 0, const AABBTreeBounds* bounds = NULL, BVHBuildStrategy bs = BVH_SPLATTER_POINTS) :
mLimit (limit),
mNbPrimitives (nb_prims),
mBounds (bounds),
mCache (NULL),
mBuildStrategy (bs)
{
}
~AABBTreeBuildParams()
{
reset();
}
PX_FORCE_INLINE void reset()
{
mLimit = mNbPrimitives = 0;
mBounds = NULL;
PX_FREE(mCache);
}
PxU32 mLimit; //!< Limit number of primitives / node. If limit is 1, build a complete tree (2*N-1 nodes)
PxU32 mNbPrimitives; //!< Number of (source) primitives.
const AABBTreeBounds* mBounds; //!< Shortcut to an app-controlled array of AABBs.
mutable PxVec3* mCache; //!< Cache for AABB centers - managed by build code.
BVHBuildStrategy mBuildStrategy;
};
//! AABB tree node used for building
class PX_PHYSX_COMMON_API AABBTreeBuildNode : public PxUserAllocated
{
public:
PX_FORCE_INLINE AABBTreeBuildNode() {}
PX_FORCE_INLINE ~AABBTreeBuildNode() {}
PX_FORCE_INLINE const PxBounds3& getAABB() const { return mBV; }
PX_FORCE_INLINE const AABBTreeBuildNode* getPos() const { return mPos; }
PX_FORCE_INLINE const AABBTreeBuildNode* getNeg() const { const AABBTreeBuildNode* P = mPos; return P ? P + 1 : NULL; }
PX_FORCE_INLINE bool isLeaf() const { return !getPos(); }
PxBounds3 mBV; //!< Global bounding-volume enclosing all the node-related primitives
const AABBTreeBuildNode* mPos; //!< "Positive" & "Negative" children
PxU32 mNodeIndex; //!< Index of node-related primitives (in the tree's mIndices array)
PxU32 mNbPrimitives; //!< Number of primitives for this node
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mNbPrimitives; }
PX_FORCE_INLINE PxU32 getNbRuntimePrimitives() const { return mNbPrimitives; }
PX_FORCE_INLINE void setNbRunTimePrimitives(PxU32 val) { mNbPrimitives = val; }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* base) const { return base + mNodeIndex; }
PX_FORCE_INLINE PxU32* getPrimitives(PxU32* base) { return base + mNodeIndex; }
void subdivide(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
void subdivideSAH(const AABBTreeBuildParams& params, SAH_Buffers& sah, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
void _buildHierarchy(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
void _buildHierarchySAH(const AABBTreeBuildParams& params, SAH_Buffers& sah, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices);
};
//! For complete trees we can predict the final number of nodes and preallocate them. For incomplete trees we can't.
//! But we don't want to allocate nodes one by one (which would be quite slow), so we use this helper class to
//! allocate N nodes at once, while minimizing the amount of nodes allocated for nothing. An initial amount of
//! nodes is estimated using the max number for a complete tree, and the user-defined number of primitives per leaf.
//! In ideal cases this estimated number will be quite close to the final number of nodes. When that number is not
//! enough though, slabs of N=1024 extra nodes are allocated until the build is complete.
class PX_PHYSX_COMMON_API NodeAllocator : public PxUserAllocated
{
public:
NodeAllocator();
~NodeAllocator();
void release();
void init(PxU32 nbPrimitives, PxU32 limit);
AABBTreeBuildNode* getBiNode();
AABBTreeBuildNode* mPool;
struct Slab
{
PX_FORCE_INLINE Slab() {}
PX_FORCE_INLINE Slab(AABBTreeBuildNode* pool, PxU32 nbUsedNodes, PxU32 maxNbNodes) : mPool(pool), mNbUsedNodes(nbUsedNodes), mMaxNbNodes(maxNbNodes) {}
AABBTreeBuildNode* mPool;
PxU32 mNbUsedNodes;
PxU32 mMaxNbNodes;
};
PxArray<Slab> mSlabs;
PxU32 mCurrentSlabIndex;
PxU32 mTotalNbNodes;
};
#if PX_VC
#pragma warning(pop)
#endif
/*
* \brief Builds AABBtree from given parameters.
* \param params [in/out] AABBTree build params
* \param nodeAllocator [in/out] Node allocator
* \param stats [out] Statistics
* \return Indices buffer allocated during build, or NULL if failed
*/
PX_PHYSX_COMMON_API PxU32* buildAABBTree(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats);
// PT: TODO: explain how users should call these functions and maybe revisit this
PX_PHYSX_COMMON_API void flattenTree(const NodeAllocator& nodeAllocator, BVHNode* dest, const PxU32* remap = NULL);
PX_PHYSX_COMMON_API void buildAABBTree(PxU32 nbBounds, const AABBTreeBounds& bounds, PxArray<BVHNode>& tree);
PxU32 reshuffle(PxU32 nb, PxU32* const PX_RESTRICT prims, const PxVec3* PX_RESTRICT centers, float splitValue, PxU32 axis);
class BitArray
{
public:
BitArray() : mBits(NULL), mSize(0) {}
BitArray(PxU32 nb_bits) { init(nb_bits); }
~BitArray() { PX_FREE(mBits); }
bool init(PxU32 nb_bits);
// Data management
PX_FORCE_INLINE void setBit(PxU32 bit_number)
{
mBits[bit_number>>5] |= 1<<(bit_number&31);
}
PX_FORCE_INLINE void clearBit(PxU32 bit_number)
{
mBits[bit_number>>5] &= ~(1<<(bit_number&31));
}
PX_FORCE_INLINE void toggleBit(PxU32 bit_number)
{
mBits[bit_number>>5] ^= 1<<(bit_number&31);
}
PX_FORCE_INLINE void clearAll() { PxMemZero(mBits, mSize*4); }
PX_FORCE_INLINE void setAll() { PxMemSet(mBits, 0xff, mSize*4); }
void resize(PxU32 maxBitNumber);
// Data access
PX_FORCE_INLINE PxIntBool isSet(PxU32 bit_number) const
{
return PxIntBool(mBits[bit_number>>5] & (1<<(bit_number&31)));
}
PX_FORCE_INLINE const PxU32* getBits() const { return mBits; }
PX_FORCE_INLINE PxU32 getSize() const { return mSize; }
protected:
PxU32* mBits; //!< Array of bits
PxU32 mSize; //!< Size of the array in dwords
};
//! Contains AABB-tree merge parameters
class AABBTreeMergeData
{
public:
AABBTreeMergeData(PxU32 nbNodes, const BVHNode* nodes, PxU32 nbIndices, const PxU32* indices, PxU32 indicesOffset) :
mNbNodes(nbNodes), mNodes(nodes), mNbIndices(nbIndices), mIndices(indices), mIndicesOffset(indicesOffset)
{
}
~AABBTreeMergeData() {}
PX_FORCE_INLINE const BVHNode& getRootNode() const { return *mNodes; }
public:
PxU32 mNbNodes; //!< Number of nodes of AABB tree merge
const BVHNode* mNodes; //!< Nodes of AABB tree merge
PxU32 mNbIndices; //!< Number of indices of AABB tree merge
const PxU32* mIndices; //!< Indices of AABB tree merge
PxU32 mIndicesOffset; //!< Indices offset from pruning pool
};
// Progressive building
class FIFOStack;
//~Progressive building
// PT: base class used to share some data and code between Gu::AABBtree and Gu::BVH. This is WIP and subject to change.
// Design dictated by refactoring necessities rather than a grand vision of something.
class BVHCoreData : public PxUserAllocated
{
public:
BVHCoreData() : mNbIndices(0), mNbNodes(0), mNodes(NULL), mIndices(NULL) {}
PX_FORCE_INLINE PxU32 getNbIndices() const { return mNbIndices; }
PX_FORCE_INLINE const PxU32* getIndices() const { return mIndices; }
PX_FORCE_INLINE PxU32* getIndices() { return mIndices; }
PX_FORCE_INLINE void setIndices(PxU32* indices) { mIndices = indices; }
PX_FORCE_INLINE PxU32 getNbNodes() const { return mNbNodes; }
PX_FORCE_INLINE const BVHNode* getNodes() const { return mNodes; }
PX_FORCE_INLINE BVHNode* getNodes() { return mNodes; }
PX_PHYSX_COMMON_API void fullRefit(const PxBounds3* boxes);
// PT: I'm leaving the above accessors here to avoid refactoring the SQ code using them, but members became public.
PxU32 mNbIndices; //!< Nb indices
PxU32 mNbNodes; //!< Number of nodes in the tree.
BVHNode* mNodes; //!< Linear pool of nodes.
PxU32* mIndices; //!< Indices in the app list. Indices are reorganized during build (permutation).
};
class BVHPartialRefitData : public BVHCoreData
{
public:
PX_PHYSX_COMMON_API BVHPartialRefitData();
PX_PHYSX_COMMON_API ~BVHPartialRefitData();
PX_PHYSX_COMMON_API void releasePartialRefitData(bool clearRefitMap);
// adds node[index] to a list of nodes to refit when refitMarkedNodes is called
// Note that this includes updating the hierarchy up the chain
PX_PHYSX_COMMON_API void markNodeForRefit(TreeNodeIndex nodeIndex);
PX_PHYSX_COMMON_API void refitMarkedNodes(const PxBounds3* boxes);
PX_FORCE_INLINE PxU32* getUpdateMap() { return mUpdateMap; }
protected:
PxU32* mParentIndices; //!< PT: hot/cold split, keep parent data in separate array
PxU32* mUpdateMap; //!< PT: Local index to tree node index
BitArray mRefitBitmask; //!< bit is set for each node index in markForRefit
PxU32 mRefitHighestSetWord;
PxU32* getParentIndices();
public:
void createUpdateMap(PxU32 nbObjects);
};
//! AABB-tree, N primitives/leaf
// PT: TODO: each PX_PHYSX_COMMON_API is a cross-DLL call, should we split that class in Gu/Sq parts to minimize this?
class AABBTree : public BVHPartialRefitData
{
public:
PX_PHYSX_COMMON_API AABBTree();
PX_PHYSX_COMMON_API ~AABBTree();
// Build
PX_PHYSX_COMMON_API bool build(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator);
// Progressive building
PX_PHYSX_COMMON_API PxU32 progressiveBuild(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats, PxU32 progress, PxU32 limit);
//~Progressive building
PX_PHYSX_COMMON_API void release(bool clearRefitMap=true);
// Merge tree with another one
PX_PHYSX_COMMON_API void mergeTree(const AABBTreeMergeData& tree);
// Initialize tree from given merge data
PX_PHYSX_COMMON_API void initTree(const AABBTreeMergeData& tree);
// Data access
PX_FORCE_INLINE PxU32 getTotalPrims() const { return mTotalPrims; }
PX_PHYSX_COMMON_API void shiftOrigin(const PxVec3& shift);
// Shift indices of the tree by offset. Used for merged trees, when initial indices needs to be shifted to match indices in current pruning pool
PX_PHYSX_COMMON_API void shiftIndices(PxU32 offset);
#if PX_DEBUG
void validate() {}
#endif
private:
PxU32 mTotalPrims; //!< Copy of final BuildStats::mTotalPrims
// Progressive building
FIFOStack* mStack;
//~Progressive building
bool buildInit(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats);
void buildEnd(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, const BuildStats& stats);
// tree merge
void mergeRuntimeNode(BVHNode& targetNode, const AABBTreeMergeData& tree, PxU32 targetNodeIndex);
void mergeRuntimeLeaf(BVHNode& targetNode, const AABBTreeMergeData& tree, PxU32 targetNodeIndex);
void addRuntimeChilds(PxU32& nodeIndex, const AABBTreeMergeData& tree);
void traverseRuntimeNode(BVHNode& targetNode, const AABBTreeMergeData& tree, PxU32 nodeIndex);
};
} // namespace Gu
}
#endif // GU_AABBTREE_H
| 14,093 | C | 41.197605 | 161 | 0.698858 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMaverickNode.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuMaverickNode.h"
using namespace physx;
using namespace Gu;
const PxU32 MaverickNode::mIndices[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
bool MaverickNode::addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp)
{
if(mNbFree<FREE_PRUNER_SIZE)
{
const PxU32 index = mNbFree++;
mFreeObjects[index] = object;
mFreeHandles[index] = handle;
mFreeBounds[index] = worldAABB;
mFreeTransforms[index] = transform;
mFreeStamps[index] = timeStamp;
return true;
}
return false;
}
bool MaverickNode::updateObject(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform)
{
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeObjects[i]==object)
{
mFreeBounds[i] = worldAABB;
mFreeTransforms[i] = transform;
return true;
}
}
return false;
}
bool MaverickNode::updateObject(PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform)
{
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeHandles[i]==handle)
{
mFreeBounds[i] = worldAABB;
mFreeTransforms[i] = transform;
return true;
}
}
return false;
}
void MaverickNode::remove(PxU32 index)
{
mNbFree--;
if(index!=mNbFree)
{
mFreeBounds[index] = mFreeBounds[mNbFree];
mFreeTransforms[index] = mFreeTransforms[mNbFree];
mFreeObjects[index] = mFreeObjects[mNbFree];
mFreeHandles[index] = mFreeHandles[mNbFree];
mFreeStamps[index] = mFreeStamps[mNbFree];
}
}
bool MaverickNode::removeObject(const PrunerPayload& object, PxU32& timeStamp)
{
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeObjects[i]==object)
{
// We found the object we want to remove. Close the gap as usual.
timeStamp = mFreeStamps[i];
remove(i);
return true;
}
}
return false;
}
bool MaverickNode::removeObject(PrunerHandle handle, PxU32& timeStamp)
{
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeHandles[i]==handle)
{
// We found the object we want to remove. Close the gap as usual.
timeStamp = mFreeStamps[i];
remove(i);
return true;
}
}
return false;
}
PxU32 MaverickNode::removeMarkedObjects(PxU32 timeStamp)
{
PxU32 nbRemoved=0;
PxU32 i=0;
while(i<mNbFree)
{
if(mFreeStamps[i]==timeStamp)
{
nbRemoved++;
remove(i);
}
else i++;
}
return nbRemoved;
}
void MaverickNode::shiftOrigin(const PxVec3& shift)
{
for(PxU32 i=0;i<mNbFree;i++)
{
mFreeBounds[i].minimum -= shift;
mFreeBounds[i].maximum -= shift;
mFreeTransforms[i].p -= shift;
}
}
| 4,206 | C++ | 27.619047 | 153 | 0.725392 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBucketPruner.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BUCKET_PRUNER_H
#define GU_BUCKET_PRUNER_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPruner.h"
#include "GuSqInternal.h"
#include "GuPruningPool.h"
#include "foundation/PxHash.h"
#define FREE_PRUNER_SIZE 16
//#define USE_REGULAR_HASH_MAP
#ifdef USE_REGULAR_HASH_MAP
#include "foundation/PxHashMap.h"
#endif
namespace physx
{
class PxRenderOutput;
namespace Gu
{
typedef PxU32 BucketWord;
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4324 ) // Padding was added at the end of a structure because of a __declspec(align) value.
#endif
PX_ALIGN_PREFIX(16) struct BucketBox
{
PxVec3 mCenter;
PxU32 mData0; // Integer-encoded min value along sorting axis
PxVec3 mExtents;
PxU32 mData1; // Integer-encoded max value along sorting axis
#ifdef _DEBUG
// PT: we need the original min value for debug checks. Using the center/extents version
// fails because recomputing the min from them introduces FPU accuracy errors in the values.
float mDebugMin;
#endif
PX_FORCE_INLINE PxVec3 getMin() const
{
return mCenter - mExtents;
}
PX_FORCE_INLINE PxVec3 getMax() const
{
return mCenter + mExtents;
}
PX_FORCE_INLINE void setEmpty()
{
mCenter = PxVec3(0.0f);
mExtents = PxVec3(-PX_MAX_BOUNDS_EXTENTS);
#ifdef _DEBUG
mDebugMin = PX_MAX_BOUNDS_EXTENTS;
#endif
}
}PX_ALIGN_SUFFIX(16);
PX_ALIGN_PREFIX(16) struct BucketPrunerNode
{
BucketPrunerNode();
void classifyBoxes( float limitX, float limitZ,
PxU32 nb,
BucketBox* PX_RESTRICT boxes,
const PrunerPayload* PX_RESTRICT objects,
const PxTransform* PX_RESTRICT transforms,
BucketBox* PX_RESTRICT sortedBoxes,
PrunerPayload* PX_RESTRICT sortedObjects,
PxTransform* PX_RESTRICT sortedTransforms,
bool isCrossBucket, PxU32 sortAxis);
PX_FORCE_INLINE void initCounters()
{
for(PxU32 i=0;i<5;i++)
mCounters[i] = 0;
for(PxU32 i=0;i<5;i++)
mOffsets[i] = 0;
}
BucketWord mCounters[5]; // Number of objects in each of the 5 children
BucketWord mOffsets[5]; // Start index of objects for each of the 5 children
BucketBox mBucketBox[5]; // AABBs around objects for each of the 5 children
PxU16 mOrder[8]; // PNS: 5 children => 3 bits/index => 3*5=15 bits total, for each of the 8 canonical directions
}PX_ALIGN_SUFFIX(16);
PX_FORCE_INLINE PxU32 PxComputeHash(const PrunerPayload& payload)
{
#if PX_P64_FAMILY
// const PxU32 h0 = PxHash((const void*)payload.data[0]);
// const PxU32 h1 = PxHash((const void*)payload.data[1]);
const PxU32 h0 = PxU32(PX_MAX_U32 & payload.data[0]);
const PxU32 h1 = PxU32(PX_MAX_U32 & payload.data[1]);
return physx::PxComputeHash(PxU64(h0)|(PxU64(h1)<<32));
#else
return physx::PxComputeHash(PxU64(payload.data[0])|(PxU64(payload.data[1])<<32));
#endif
}
#ifdef USE_REGULAR_HASH_MAP
struct BucketPrunerPair : public PxUserAllocated
{
PX_FORCE_INLINE BucketPrunerPair() {}
PX_FORCE_INLINE BucketPrunerPair(PxU32 index, PxU32 stamp) : mCoreIndex(index), mTimeStamp(stamp) {}
PxU32 mCoreIndex; // index in mCoreObjects
PxU32 mTimeStamp;
};
typedef PxHashMap<PrunerPayload, BucketPrunerPair> BucketPrunerMap;
#else
struct BucketPrunerPair : public PxUserAllocated
{
PrunerPayload mData;
PxU32 mCoreIndex; // index in mCoreObjects
PxU32 mTimeStamp;
};
// Custom hash-map - currently faster than the regular hash-map (PxHashMap), in particular for 'find-and-erase' operations.
class BucketPrunerMap : public PxUserAllocated
{
public:
BucketPrunerMap();
~BucketPrunerMap();
void purge();
void shrinkMemory();
BucketPrunerPair* addPair (const PrunerPayload& payload, PxU32 coreIndex, PxU32 timeStamp);
bool removePair (const PrunerPayload& payload, PxU32& coreIndex, PxU32& timeStamp);
const BucketPrunerPair* findPair (const PrunerPayload& payload) const;
PX_FORCE_INLINE PxU32 getPairIndex(const BucketPrunerPair* pair) const
{
return (PxU32((size_t(pair) - size_t(mActivePairs)))/sizeof(BucketPrunerPair));
}
PxU32 mHashSize;
PxU32 mMask;
PxU32 mNbActivePairs;
PxU32* mHashTable;
PxU32* mNext;
BucketPrunerPair* mActivePairs;
PxU32 mReservedMemory;
PX_FORCE_INLINE BucketPrunerPair* findPair(const PrunerPayload& payload, PxU32 hashValue) const;
void removePairInternal(const PrunerPayload& payload, PxU32 hashValue, PxU32 pairIndex);
void reallocPairs();
void reserveMemory(PxU32 memSize);
};
#endif
class BucketPrunerCore : public PxUserAllocated
{
public:
PX_PHYSX_COMMON_API BucketPrunerCore(bool externalMemory=true);
PX_PHYSX_COMMON_API ~BucketPrunerCore();
void release();
void setExternalMemory(PxU32 nbObjects, PxBounds3* boxes, PrunerPayload* objects, PxTransform* transforms);
PX_PHYSX_COMMON_API bool addObject(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp=0);
bool removeObject(const PrunerPayload& object, PxU32& timeStamp);
bool updateObject(const PxBounds3& worldAABB, const PrunerPayload& object, const PxTransform& transform);
// PT: look for objects marked with input timestamp everywhere in the structure, and remove them. This is the same
// as calling 'removeObject' individually for all these objects, but much more efficient. Returns number of removed objects.
PxU32 removeMarkedObjects(PxU32 timeStamp);
PX_PHYSX_COMMON_API bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
PX_PHYSX_COMMON_API bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback&) const;
PX_PHYSX_COMMON_API bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
void getGlobalBounds(PxBounds3& bounds) const;
void shiftOrigin(const PxVec3& shift);
void visualize(PxRenderOutput& out, PxU32 color) const;
PX_FORCE_INLINE void build() { classifyBoxes(); }
#ifdef FREE_PRUNER_SIZE
PX_FORCE_INLINE PxU32 getNbObjects() const { return mNbFree + mCoreNbObjects; }
#else
PX_FORCE_INLINE PxU32 getNbObjects() const { return mCoreNbObjects; }
#endif
// private:
PxU32 mCoreNbObjects; // Current number of objects in core arrays
PxU32 mCoreCapacity; // Capacity of core arrays
PxBounds3* mCoreBoxes; // Core array
PrunerPayload* mCoreObjects; // Core array
PxTransform* mCoreTransforms;
PxU32* mCoreRemap; // Remaps core index to sorted index, i.e. sortedIndex = mCoreRemap[coreIndex]
BucketBox* mSortedWorldBoxes; // Sorted array
PrunerPayload* mSortedObjects; // Sorted array
PxTransform* mSortedTransforms;
#ifdef FREE_PRUNER_SIZE
PxU32 mNbFree; // Current number of objects in the "free array" (mFreeObjects/mFreeBounds)
PrunerPayload mFreeObjects[FREE_PRUNER_SIZE]; // mNbFree objects are stored here
PxBounds3 mFreeBounds[FREE_PRUNER_SIZE]; // mNbFree object bounds are stored here
PxTransform mFreeTransforms[FREE_PRUNER_SIZE]; // mNbFree transforms are stored here
PxU32 mFreeStamps[FREE_PRUNER_SIZE];
#endif
BucketPrunerMap mMap; // Maps (PrunerPayload) object to corresponding index in core array.
// Objects in the free array do not appear in this map.
PxU32 mSortedNb;
PxU32 mSortedCapacity;
PxU32 mSortAxis;
BucketBox mGlobalBox; // Global bounds around all objects in the structure (except the ones in the "free" array)
BucketPrunerNode mLevel1;
BucketPrunerNode mLevel2[5];
BucketPrunerNode mLevel3[5][5];
bool mDirty;
bool mOwnMemory;
private:
PX_PHYSX_COMMON_API void classifyBoxes();
void allocateSortedMemory(PxU32 nb);
void resizeCore();
PX_FORCE_INLINE void addObjectInternal(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp);
};
#if PX_VC
#pragma warning(pop)
#endif
class BucketPruner : public Pruner
{
public:
PX_PHYSX_COMMON_API BucketPruner(PxU64 contextID);
virtual ~BucketPruner();
// BasePruner
DECLARE_BASE_PRUNER_API
//~BasePruner
// Pruner
DECLARE_PRUNER_API_COMMON
//~Pruner
private:
BucketPrunerCore mCore;
PruningPool mPool;
};
}
}
#endif
| 10,356 | C | 35.46831 | 149 | 0.708285 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuRaycastTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxTetrahedronMeshGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "GuMidphaseInterface.h"
#include "GuInternal.h"
#include "GuIntersectionRayCapsule.h"
#include "GuIntersectionRaySphere.h"
#include "GuIntersectionRayPlane.h"
#include "GuHeightFieldUtil.h"
#include "GuDistancePointSegment.h"
#include "GuConvexMesh.h"
#include "CmScaling.h"
using namespace physx;
using namespace Gu;
////////////////////////////////////////////////// raycasts //////////////////////////////////////////////////////////////////
PxU32 raycast_box(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
const PxTransform& absPose = pose;
PxVec3 localOrigin = rayOrigin - absPose.p;
localOrigin = absPose.q.rotateInv(localOrigin);
const PxVec3 localDir = absPose.q.rotateInv(rayDir);
PxVec3 localImpact;
PxReal t;
PxU32 rval = rayAABBIntersect2(-boxGeom.halfExtents, boxGeom.halfExtents, localOrigin, localDir, localImpact, t);
if(!rval)
return 0;
if(t>maxDist)
return 0;
hits->distance = t; //worldRay.orig.distance(hit.worldImpact); //should be the same, assuming ray dir was normalized!!
hits->faceIndex = 0xffffffff;
hits->u = 0.0f;
hits->v = 0.0f;
PxHitFlags outFlags = PxHitFlags(0);
if((hitFlags & PxHitFlag::ePOSITION))
{
outFlags |= PxHitFlag::ePOSITION;
if(t!=0.0f)
hits->position = absPose.transform(localImpact);
else
hits->position = rayOrigin;
}
// Compute additional information if needed
if(hitFlags & PxHitFlag::eNORMAL)
{
outFlags |= PxHitFlag::eNORMAL;
//Because rayAABBIntersect2 set t = 0 if start point inside shape
if(t == 0)
{
hits->normal = -rayDir;
}
else
{
//local space normal is:
rval--;
PxVec3 n(0.0f);
n[rval] = PxReal((localImpact[rval] > 0.0f) ? 1.0f : -1.0f);
hits->normal = absPose.q.rotate(n);
}
}
else
{
hits->normal = PxVec3(0.0f);
}
hits->flags = outFlags;
return 1;
}
PxU32 raycast_sphere(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
if(!intersectRaySphere(rayOrigin, rayDir, maxDist, pose.p, sphereGeom.radius, hits->distance, &hits->position))
return 0;
/* // PT: should be useless now
hit.distance = worldRay.orig.distance(hit.worldImpact);
if(hit.distance>maxDist)
return false;
*/
// PT: we can't avoid computing the position here since it's needed to compute the normal anyway
hits->faceIndex = 0xffffffff;
hits->u = 0.0f;
hits->v = 0.0f;
// Compute additional information if needed
PxHitFlags outFlags = PxHitFlag::ePOSITION;
if(hitFlags & PxHitFlag::eNORMAL)
{
// User requested impact normal
//Because intersectRaySphere set distance = 0 if start point inside shape
if(hits->distance == 0.0f)
{
hits->normal = -rayDir;
}
else
{
hits->normal = hits->position - pose.p;
hits->normal.normalize();
}
outFlags |= PxHitFlag::eNORMAL;
}
else
{
hits->normal = PxVec3(0.0f);
}
hits->flags = outFlags;
return 1;
}
PxU32 raycast_capsule(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
// TODO: PT: could we simplify this ?
Capsule capsule;
getCapsuleSegment(pose, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
PxReal t = 0.0f;
if(!intersectRayCapsule(rayOrigin, rayDir, capsule, t))
return 0;
if(t<0.0f || t>maxDist)
return 0;
// PT: we can't avoid computing the position here since it's needed to compute the normal anyway
hits->position = rayOrigin + rayDir*t; // PT: will be rayOrigin for t=0.0f (i.e. what the spec wants)
hits->distance = t;
hits->faceIndex = 0xffffffff;
hits->u = 0.0f;
hits->v = 0.0f;
// Compute additional information if needed
PxHitFlags outFlags = PxHitFlag::ePOSITION;
if(hitFlags & PxHitFlag::eNORMAL)
{
outFlags |= PxHitFlag::eNORMAL;
if(t==0.0f)
{
hits->normal = -rayDir;
}
else
{
PxReal capsuleT;
distancePointSegmentSquared(capsule, hits->position, &capsuleT);
capsule.computePoint(hits->normal, capsuleT);
hits->normal = hits->position - hits->normal; //this should never be zero. It should have a magnitude of the capsule radius.
hits->normal.normalize();
}
}
else
{
hits->normal = PxVec3(0.0f);
}
hits->flags = outFlags;
return 1;
}
PxU32 raycast_plane(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::ePLANE);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
PX_UNUSED(hitFlags);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
PX_UNUSED(geom);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom);
// Perform backface culling so that we can pick objects beyond planes
const PxPlane plane = getPlane(pose);
if(rayDir.dot(plane.n)>=0.0f)
return false;
PxReal distanceAlongLine;
if(!intersectRayPlane(rayOrigin, rayDir, plane, distanceAlongLine, &hits->position))
return 0;
/*
PxReal test = worldRay.orig.distance(hit.worldImpact);
PxReal dd;
PxVec3 pp;
PxSegmentPlaneIntersect(worldRay.orig, worldRay.orig+worldRay.dir*1000.0f, plane, dd, pp);
*/
if(distanceAlongLine<0.0f)
return 0;
if(distanceAlongLine>maxDist)
return 0;
hits->distance = distanceAlongLine;
hits->faceIndex = 0xffffffff;
hits->u = 0.0f;
hits->v = 0.0f;
hits->flags = PxHitFlag::ePOSITION|PxHitFlag::eNORMAL;
hits->normal = plane.n;
return 1;
}
PxU32 raycast_convexMesh(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
PX_ASSERT(maxHits && hits);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared()-1)<1e-4f);
PX_UNUSED(threadContext);
PX_UNUSED(maxHits);
PX_UNUSED(stride);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
PxGeomRaycastHit& hit = *hits;
//scaling: transform the ray to vertex space
const PxMat34 world2vertexSkew = convexGeom.scale.getInverse() * pose.getInverse();
//ConvexMesh* cmesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
const PxU32 nPolys = convexMesh->getNbPolygonsFast();
const HullPolygonData* PX_RESTRICT polysEA = convexMesh->getPolygons();
const HullPolygonData* polys = polysEA;
const PxVec3 vrayOrig = world2vertexSkew.transform(rayOrigin);
const PxVec3 vrayDir = world2vertexSkew.rotate(rayDir);
/*
Purely convex planes based algorithm
Iterate all planes of convex, with following rules:
* determine of ray origin is inside them all or not.
* planes parallel to ray direction are immediate early out if we're on the outside side (plane normal is sep axis)
* else
- for all planes the ray direction "enters" from the front side, track the one furthest along the ray direction (A)
- for all planes the ray direction "exits" from the back side, track the one furthest along the negative ray direction (B)
if the ray origin is outside the convex and if along the ray, A comes before B, the directed line stabs the convex at A
*/
bool originInsideAllPlanes = true;
PxReal latestEntry = -FLT_MAX;
PxReal earliestExit = FLT_MAX;
// PxU32 bestPolygonIndex = 0;
hit.faceIndex = 0xffffffff;
for(PxU32 i=0;i<nPolys;i++)
{
const HullPolygonData& poly = polys[i];
const PxPlane& vertSpacePlane = poly.mPlane;
const PxReal distToPlane = vertSpacePlane.distance(vrayOrig);
const PxReal dn = vertSpacePlane.n.dot(vrayDir);
const PxReal distAlongRay = -distToPlane/dn; // PT: TODO: potential divide by zero here!
// PT: TODO: this is computed again in the last branch!
if(distToPlane > 0.0f)
originInsideAllPlanes = false; //origin not behind plane == ray starts outside the convex.
if(dn > 1E-7f) //the ray direction "exits" from the back side
{
earliestExit = physx::intrinsics::selectMin(earliestExit, distAlongRay);
}
else if(dn < -1E-7f) //the ray direction "enters" from the front side
{
if(distAlongRay > latestEntry)
{
latestEntry = distAlongRay;
hit.faceIndex = i;
}
}
else
{
//plane normal and ray dir are orthogonal
if(distToPlane > 0.0f)
return 0; //a plane is parallel with ray -- and we're outside the ray -- we definitely miss the entire convex!
}
}
if(originInsideAllPlanes) //ray starts inside convex
{
hit.distance = 0.0f;
hit.faceIndex = 0xffffffff;
hit.u = 0.0f;
hit.v = 0.0f;
hit.position = rayOrigin;
hit.normal = -rayDir;
hit.flags = PxHitFlag::eNORMAL|PxHitFlag::ePOSITION;
return 1;
}
// AP: changed to latestEntry < maxDist-1e-5f so that we have a conservatively negative result near end of ray
if(latestEntry < earliestExit && latestEntry > 0.0f && latestEntry < maxDist-1e-5f)
{
PxHitFlags outFlags = PxHitFlag::eFACE_INDEX;
if(hitFlags & PxHitFlag::ePOSITION)
{
outFlags |= PxHitFlag::ePOSITION;
const PxVec3 pointOnPlane = vrayOrig + latestEntry * vrayDir;
hit.position = pose.transform(Cm::toMat33(convexGeom.scale) * pointOnPlane);
}
hit.distance = latestEntry;
hit.u = 0.0f;
hit.v = 0.0f;
hit.normal = PxVec3(0.0f);
// Compute additional information if needed
if(hitFlags & PxHitFlag::eNORMAL)
{
outFlags |= PxHitFlag::eNORMAL;
//when we have nonuniform scaling we actually have to transform by the transpose of the inverse of vertex2worldSkew.M == transpose of world2vertexSkew:
hit.normal = world2vertexSkew.rotateTranspose(polys[hit.faceIndex].mPlane.n);
hit.normal.normalize();
}
hit.flags = outFlags;
return 1;
}
return 0;
}
PxU32 raycast_particlesystem(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::ePARTICLESYSTEM);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared() - 1)<1e-4f);
PX_UNUSED(threadContext);
PX_UNUSED(stride);
PX_UNUSED(rayDir);
PX_UNUSED(pose);
PX_UNUSED(rayOrigin);
PX_UNUSED(maxHits);
PX_UNUSED(maxDist);
PX_UNUSED(hits);
PX_UNUSED(hitFlags);
PX_UNUSED(geom);
return 0;
}
PxU32 raycast_softbody(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eTETRAHEDRONMESH);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared() - 1)<1e-4f);
PX_UNUSED(threadContext);
PX_UNUSED(stride);
PX_UNUSED(rayDir);
PX_UNUSED(pose);
PX_UNUSED(rayOrigin);
PX_UNUSED(maxHits);
PX_UNUSED(maxDist);
PX_UNUSED(hits);
PX_UNUSED(hitFlags);
const PxTetrahedronMeshGeometry& meshGeom = static_cast<const PxTetrahedronMeshGeometry&>(geom);
PX_UNUSED(meshGeom);
//ML: need to implement raycastTetrahedronMesh
return 0;
}
PxU32 raycast_triangleMesh(GU_RAY_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_ASSERT(geom.getType() == PxGeometryType::eTRIANGLEMESH);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared()-1)<1e-4f);
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom);
TriangleMesh* meshData = static_cast<TriangleMesh*>(meshGeom.triangleMesh);
return Midphase::raycastTriangleMesh(meshData, meshGeom, pose, rayOrigin, rayDir, maxDist, hitFlags, maxHits, hits, stride);
}
PxU32 raycast_hairsystem(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eHAIRSYSTEM);
PX_ASSERT(PxAbs(rayDir.magnitudeSquared() - 1)<1e-4f);
PX_UNUSED(threadContext);
PX_UNUSED(stride);
PX_UNUSED(rayDir);
PX_UNUSED(pose);
PX_UNUSED(rayOrigin);
PX_UNUSED(maxHits);
PX_UNUSED(maxDist);
PX_UNUSED(hits);
PX_UNUSED(hitFlags);
PX_UNUSED(geom);
return 0;
}
namespace
{
struct HFTraceSegmentCallback
{
PX_NOCOPY(HFTraceSegmentCallback)
public:
PxU8* mHits;
const PxU32 mMaxHits;
const PxU32 mStride;
PxU32 mNbHits;
const HeightFieldUtil& mUtil;
const PxTransform& mPose;
const PxVec3& mRayDir;
const PxVec3& mLocalRayDir;
const PxVec3& mLocalRayOrig;
const PxHitFlags mHitFlags;
const bool mIsDoubleSided;
HFTraceSegmentCallback( PxGeomRaycastHit* hits, PxU32 maxHits, PxU32 stride, const PxHitFlags hitFlags, const HeightFieldUtil& hfUtil, const PxTransform& pose,
const PxVec3& rayDir, const PxVec3& localRayDir, const PxVec3& localRayOrig,
bool isDoubleSided) :
mHits (reinterpret_cast<PxU8*>(hits)),
mMaxHits (maxHits),
mStride (stride),
mNbHits (0),
mUtil (hfUtil),
mPose (pose),
mRayDir (rayDir),
mLocalRayDir (localRayDir),
mLocalRayOrig (localRayOrig),
mHitFlags (hitFlags),
mIsDoubleSided (isDoubleSided)
{
PX_ASSERT(maxHits > 0);
}
PX_FORCE_INLINE bool onEvent(PxU32, const PxU32*)
{
return true;
}
PX_FORCE_INLINE bool underFaceHit(const HeightFieldUtil&, const PxVec3&, const PxVec3&, PxF32, PxF32, PxF32, PxU32)
{
return true; // true means continue traversal
}
PxAgain faceHit(const HeightFieldUtil&, const PxVec3& aHitPoint, PxU32 aTriangleIndex, PxReal u, PxReal v)
{
// traversal is strictly sorted so there's no need to sort hits
if(mNbHits >= mMaxHits)
return false; // false = stop traversal
PxGeomRaycastHit& hit = *reinterpret_cast<PxGeomRaycastHit*>(mHits);
mNbHits++;
mHits += mStride;
hit.position = aHitPoint;
hit.faceIndex = aTriangleIndex;
hit.u = u;
hit.v = v;
hit.flags = PxHitFlag::eUV | PxHitFlag::eFACE_INDEX; // UVs and face index are always set
if(mHitFlags & PxHitFlag::eNORMAL)
{
// We need the normal for the dot product.
PxVec3 normal = mPose.q.rotate(mUtil.getNormalAtShapePoint(hit.position.x, hit.position.z));
normal.normalize();
if(mIsDoubleSided && normal.dot(mRayDir) > 0.0f) // comply with normal spec for double sided (should always face opposite rayDir)
hit.normal = -normal;
else
hit.normal = normal;
hit.flags |= PxHitFlag::eNORMAL;
}
hit.distance = physx::intrinsics::selectMax(0.f, (hit.position - mLocalRayOrig).dot(mLocalRayDir));
if(mHitFlags & PxHitFlag::ePOSITION)
{
hit.position = mPose.transform(hit.position);
hit.flags |= PxHitFlag::ePOSITION;
}
return (mNbHits < mMaxHits); // true = continue traversal, false = stop traversal
}
};
}
PxU32 raycast_heightField(GU_RAY_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eHEIGHTFIELD);
PX_ASSERT(maxHits && hits);
PX_UNUSED(threadContext);
const PxHeightFieldGeometry& hfGeom = static_cast<const PxHeightFieldGeometry&>(geom);
const PxTransform invAbsPose = pose.getInverse();
const PxVec3 localRayOrig = invAbsPose.transform(rayOrigin);
const PxVec3 localRayDir = invAbsPose.rotate(rayDir);
const bool isDoubleSided = hfGeom.heightFieldFlags.isSet(PxMeshGeometryFlag::eDOUBLE_SIDED);
const bool bothSides = isDoubleSided || (hitFlags & PxHitFlag::eMESH_BOTH_SIDES);
const HeightFieldTraceUtil hfUtil(hfGeom);
PxVec3 normRayDir = localRayDir;
normRayDir.normalizeSafe(); // nothing will happen if length is < PX_NORMALIZATION_EPSILON
// pretest if we intersect HF bounds. If no early exit, if yes move the origin and shorten the maxDist
// to deal with precision issues with large maxDist
PxBounds3 hfLocalBounds;
hfUtil.computeLocalBounds(hfLocalBounds);
// PT: inflate the bounds like we do in the scene-tree (see PX-1179)
const PxVec3 center = hfLocalBounds.getCenter();
const PxVec3 extents = hfLocalBounds.getExtents() * 1.01f; //SQ_PRUNER_INFLATION;
hfLocalBounds.minimum = center - extents;
hfLocalBounds.maximum = center + extents;
PxVec3 localImpact;
PxReal t; // closest intersection, t==0 hit inside
PxU32 rval = rayAABBIntersect2(hfLocalBounds.minimum, hfLocalBounds.maximum, localRayOrig, localRayDir, localImpact, t);
// early exit we miss the AABB
if (!rval)
return 0;
if (t > maxDist)
return 0;
// PT: if eMESH_ANY is used then eMESH_MULTIPLE won't be, and we'll stop the query after 1 hit is found. There is no difference
// between 'any hit' and 'closest hit' for HFs since hits are reported in order.
HFTraceSegmentCallback callback(hits, hitFlags.isSet(PxHitFlag::eMESH_MULTIPLE) ? maxHits : 1, stride, hitFlags, hfUtil, pose,
rayDir, localRayDir, localRayOrig, isDoubleSided); // make sure we return only 1 hit without eMESH_MULTIPLE
PxReal offset = 0.0f;
PxReal maxDistOffset = maxDist;
PxVec3 localRayOrigOffset = localRayOrig;
// if we don't start inside the AABB box, offset the start pos, because of precision issues with large maxDist
if(t > 0.0f)
{
offset = t - GU_RAY_SURFACE_OFFSET;
// move the rayOrig to offset start pos
localRayOrigOffset = localRayOrig + normRayDir*offset;
}
// shorten the maxDist of the offset that was cut off and clip it
// we pick either the original maxDist, if maxDist is huge we clip it
maxDistOffset = PxMin(maxDist - offset, GU_RAY_SURFACE_OFFSET + 2.0f * PxMax(hfLocalBounds.maximum.x - hfLocalBounds.minimum.x, PxMax(hfLocalBounds.maximum.y - hfLocalBounds.minimum.y, hfLocalBounds.maximum.z - hfLocalBounds.minimum.z)));
hfUtil.traceSegment<HFTraceSegmentCallback, false, false>(localRayOrigOffset, normRayDir, maxDistOffset,
&callback, hfLocalBounds, !bothSides);
return callback.mNbHits;
}
static PxU32 raycast_custom(GU_RAY_FUNC_PARAMS)
{
const PxCustomGeometry& customGeom = static_cast<const PxCustomGeometry&>(geom);
if(customGeom.isValid())
return customGeom.callbacks->raycast(rayOrigin, rayDir, geom, pose, maxDist, hitFlags, maxHits, hits, stride, threadContext);
return 0;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// PT: table is not static because it's accessed as 'extern' within Gu (bypassing the function call).
RaycastFunc gRaycastMap[] =
{
raycast_sphere,
raycast_plane,
raycast_capsule,
raycast_box,
raycast_convexMesh,
raycast_particlesystem,
raycast_softbody,
raycast_triangleMesh,
raycast_heightField,
raycast_hairsystem,
raycast_custom
};
PX_COMPILE_TIME_ASSERT(sizeof(gRaycastMap) / sizeof(gRaycastMap[0]) == PxGeometryType::eGEOMETRY_COUNT);
// PT: the function is used by external modules (Np, CCT, Sq)
const Gu::GeomRaycastTable& Gu::getRaycastFuncTable()
{
return gRaycastMap;
}
| 20,109 | C++ | 30.619497 | 239 | 0.718385 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBTree.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuAABBTreeBounds.h"
#include "GuAABBTree.h"
#include "GuAABBTreeBuildStats.h"
#include "GuBounds.h"
#include "GuAABBTreeNode.h"
#include "GuSAH.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxFPU.h"
using namespace physx;
using namespace Gu;
///////////////////////////////////////////////////////////////////////////////
void AABBTreeBounds::init(PxU32 nbBounds, const PxBounds3* bounds)
{
PX_FREE(mBounds);
// PT: we always allocate one extra box, to make sure we can safely use V4 loads on the array
mBounds = PX_ALLOCATE(PxBounds3, (nbBounds + 1), "AABBTreeBounds");
if(bounds)
PxMemCopy(mBounds, bounds, nbBounds*sizeof(PxBounds3));
}
void AABBTreeBounds::resize(PxU32 newSize, PxU32 previousSize)
{
PxBounds3* newBounds = PX_ALLOCATE(PxBounds3, (newSize + 1), "AABBTreeBounds");
if(mBounds && previousSize)
PxMemCopy(newBounds, mBounds, sizeof(PxBounds3)*previousSize);
PX_FREE(mBounds);
mBounds = newBounds;
}
void AABBTreeBounds::release()
{
if(!mUserAllocated)
PX_FREE(mBounds);
}
///////////////////////////////////////////////////////////////////////////////
NodeAllocator::NodeAllocator() : mPool(NULL), mCurrentSlabIndex(0), mTotalNbNodes(0)
{
}
NodeAllocator::~NodeAllocator()
{
release();
}
void NodeAllocator::release()
{
const PxU32 nbSlabs = mSlabs.size();
for (PxU32 i = 0; i<nbSlabs; i++)
{
Slab& s = mSlabs[i];
PX_DELETE_ARRAY(s.mPool);
}
mSlabs.reset();
mCurrentSlabIndex = 0;
mTotalNbNodes = 0;
}
void NodeAllocator::init(PxU32 nbPrimitives, PxU32 limit)
{
const PxU32 maxSize = nbPrimitives * 2 - 1; // PT: max possible #nodes for a complete tree
const PxU32 estimatedFinalSize = maxSize <= 1024 ? maxSize : maxSize / limit;
mPool = PX_NEW(AABBTreeBuildNode)[estimatedFinalSize];
PxMemZero(mPool, sizeof(AABBTreeBuildNode)*estimatedFinalSize);
// Setup initial node. Here we have a complete permutation of the app's primitives.
mPool->mNodeIndex = 0;
mPool->mNbPrimitives = nbPrimitives;
mSlabs.pushBack(Slab(mPool, 1, estimatedFinalSize));
mCurrentSlabIndex = 0;
mTotalNbNodes = 1;
}
// PT: TODO: inline this?
AABBTreeBuildNode* NodeAllocator::getBiNode()
{
mTotalNbNodes += 2;
Slab& currentSlab = mSlabs[mCurrentSlabIndex];
if (currentSlab.mNbUsedNodes + 2 <= currentSlab.mMaxNbNodes)
{
AABBTreeBuildNode* biNode = currentSlab.mPool + currentSlab.mNbUsedNodes;
currentSlab.mNbUsedNodes += 2;
return biNode;
}
else
{
// Allocate new slab
const PxU32 size = 1024;
AABBTreeBuildNode* pool = PX_NEW(AABBTreeBuildNode)[size];
PxMemZero(pool, sizeof(AABBTreeBuildNode)*size);
mSlabs.pushBack(Slab(pool, 2, size));
mCurrentSlabIndex++;
return pool;
}
}
///////////////////////////////////////////////////////////////////////////////
PxU32 Gu::reshuffle(PxU32 nb, PxU32* const PX_RESTRICT prims, const PxVec3* PX_RESTRICT centers, float splitValue, PxU32 axis)
{
// PT: to avoid calling the unsafe [] operator
const size_t ptrValue = size_t(centers) + axis*sizeof(float);
const PxVec3* PX_RESTRICT centersX = reinterpret_cast<const PxVec3*>(ptrValue);
// Loop through all node-related primitives. Their indices range from mNodePrimitives[0] to mNodePrimitives[mNbPrimitives-1].
// Those indices map the global list in the tree builder.
PxU32 nbPos = 0;
for(PxU32 i=0; i<nb; i++)
{
// Get index in global list
const PxU32 index = prims[i];
// Test against the splitting value. The primitive value is tested against the enclosing-box center.
// [We only need an approximate partition of the enclosing box here.]
const float primitiveValue = centersX[index].x;
PX_ASSERT(primitiveValue == centers[index][axis]);
// Reorganize the list of indices in this order: positive - negative.
if (primitiveValue > splitValue)
{
// Swap entries
prims[i] = prims[nbPos];
prims[nbPos] = index;
// Count primitives assigned to positive space
nbPos++;
}
}
return nbPos;
}
static PxU32 split(const PxBounds3& box, PxU32 nb, PxU32* const PX_RESTRICT prims, PxU32 axis, const AABBTreeBuildParams& params)
{
// Get node split value
float splitValue = 0.0f;
//float defaultSplitValue = box.getCenter(axis);
//(void)defaultSplitValue;
if(params.mBuildStrategy==BVH_SPLATTER_POINTS_SPLIT_GEOM_CENTER)
{
// PT: experimental attempt at replicating BV4_SPLATTER_POINTS_SPLIT_GEOM_CENTER, but with boxes instead of triangles.
const PxBounds3* bounds = params.mBounds->getBounds();
for(PxU32 i=0;i<nb;i++)
{
const PxBounds3& current = bounds[prims[i]];
splitValue += current.getCenter(axis);
// splitValue += (*VP.Vertex[0])[axis];
// splitValue += (*VP.Vertex[1])[axis];
// splitValue += (*VP.Vertex[2])[axis];
}
// splitValue /= float(nb*3);
splitValue /= float(nb);
}
else
{
// Default split value = middle of the axis (using only the box)
splitValue = box.getCenter(axis);
}
return reshuffle(nb, prims, params.mCache, splitValue, axis);
}
void AABBTreeBuildNode::subdivide(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices)
{
PxU32* const PX_RESTRICT primitives = indices + mNodeIndex;
const PxU32 nbPrims = mNbPrimitives;
// Compute global box & means for current node. The box is stored in mBV.
Vec4V meansV;
{
const PxBounds3* PX_RESTRICT boxes = params.mBounds->getBounds();
PX_ASSERT(boxes);
PX_ASSERT(primitives);
PX_ASSERT(nbPrims);
Vec4V minV = V4LoadU(&boxes[primitives[0]].minimum.x);
Vec4V maxV = V4LoadU(&boxes[primitives[0]].maximum.x);
meansV = V4LoadU(¶ms.mCache[primitives[0]].x);
for (PxU32 i = 1; i<nbPrims; i++)
{
const PxU32 index = primitives[i];
const Vec4V curMinV = V4LoadU(&boxes[index].minimum.x);
const Vec4V curMaxV = V4LoadU(&boxes[index].maximum.x);
meansV = V4Add(meansV, V4LoadU(¶ms.mCache[index].x));
minV = V4Min(minV, curMinV);
maxV = V4Max(maxV, curMaxV);
}
StoreBounds(mBV, minV, maxV);
const float coeff = 1.0f / float(nbPrims);
meansV = V4Scale(meansV, FLoad(coeff));
}
// Check the user-defined limit. Also ensures we stop subdividing if we reach a leaf node.
if (nbPrims <= params.mLimit)
return;
bool validSplit = true;
PxU32 nbPos;
{
// Compute variances
Vec4V varsV = V4Zero();
for (PxU32 i = 0; i<nbPrims; i++)
{
const PxU32 index = primitives[i];
Vec4V centerV = V4LoadU(¶ms.mCache[index].x);
centerV = V4Sub(centerV, meansV);
centerV = V4Mul(centerV, centerV);
varsV = V4Add(varsV, centerV);
}
const float coeffNb1 = 1.0f / float(nbPrims - 1);
varsV = V4Scale(varsV, FLoad(coeffNb1));
PX_ALIGN(16, PxVec4) vars;
V4StoreA(varsV, &vars.x);
// Choose axis with greatest variance
const PxU32 axis = PxLargestAxis(PxVec3(vars.x, vars.y, vars.z));
// Split along the axis
nbPos = split(mBV, nbPrims, primitives, axis, params);
// Check split validity
if (!nbPos || nbPos == nbPrims)
validSplit = false;
}
// Check the subdivision has been successful
if (!validSplit)
{
// Here, all boxes lie in the same sub-space. Two strategies:
// - if we are over the split limit, make an arbitrary 50-50 split
// - else stop subdividing
if (nbPrims>params.mLimit)
{
nbPos = nbPrims >> 1;
}
else return;
}
// Now create children and assign their pointers.
mPos = allocator.getBiNode();
stats.increaseCount(2);
// Assign children
PX_ASSERT(!isLeaf());
AABBTreeBuildNode* Pos = const_cast<AABBTreeBuildNode*>(mPos);
AABBTreeBuildNode* Neg = Pos + 1;
Pos->mNodeIndex = mNodeIndex;
Pos->mNbPrimitives = nbPos;
Neg->mNodeIndex = mNodeIndex + nbPos;
Neg->mNbPrimitives = mNbPrimitives - nbPos;
}
void AABBTreeBuildNode::_buildHierarchy(const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& nodeBase, PxU32* const indices)
{
// Subdivide current node
subdivide(params, stats, nodeBase, indices);
// Recurse
if (!isLeaf())
{
AABBTreeBuildNode* Pos = const_cast<AABBTreeBuildNode*>(getPos());
PX_ASSERT(Pos);
AABBTreeBuildNode* Neg = Pos + 1;
Pos->_buildHierarchy(params, stats, nodeBase, indices);
Neg->_buildHierarchy(params, stats, nodeBase, indices);
}
stats.mTotalPrims += mNbPrimitives;
}
void AABBTreeBuildNode::subdivideSAH(const AABBTreeBuildParams& params, SAH_Buffers& buffers, BuildStats& stats, NodeAllocator& allocator, PxU32* const indices)
{
PxU32* const PX_RESTRICT primitives = indices + mNodeIndex;
const PxU32 nbPrims = mNbPrimitives;
// Compute global box for current node. The box is stored in mBV.
computeGlobalBox(mBV, nbPrims, params.mBounds->getBounds(), primitives);
// Check the user-defined limit. Also ensures we stop subdividing if we reach a leaf node.
if (nbPrims <= params.mLimit)
return;
/////
PxU32 leftCount;
if(!buffers.split(leftCount, nbPrims, primitives, params.mBounds->getBounds(), params.mCache))
{
// Invalid split => fallback to previous strategy
subdivide(params, stats, allocator, indices);
return;
}
/////
// Now create children and assign their pointers.
mPos = allocator.getBiNode();
stats.increaseCount(2);
// Assign children
PX_ASSERT(!isLeaf());
AABBTreeBuildNode* Pos = const_cast<AABBTreeBuildNode*>(mPos);
AABBTreeBuildNode* Neg = Pos + 1;
Pos->mNodeIndex = mNodeIndex;
Pos->mNbPrimitives = leftCount;
Neg->mNodeIndex = mNodeIndex + leftCount;
Neg->mNbPrimitives = mNbPrimitives - leftCount;
}
void AABBTreeBuildNode::_buildHierarchySAH(const AABBTreeBuildParams& params, SAH_Buffers& sah, BuildStats& stats, NodeAllocator& nodeBase, PxU32* const indices)
{
// Subdivide current node
subdivideSAH(params, sah, stats, nodeBase, indices);
// Recurse
if (!isLeaf())
{
AABBTreeBuildNode* Pos = const_cast<AABBTreeBuildNode*>(getPos());
PX_ASSERT(Pos);
AABBTreeBuildNode* Neg = Pos + 1;
Pos->_buildHierarchySAH(params, sah, stats, nodeBase, indices);
Neg->_buildHierarchySAH(params, sah, stats, nodeBase, indices);
}
stats.mTotalPrims += mNbPrimitives;
}
///////////////////////////////////////////////////////////////////////////////
static PxU32* initAABBTreeBuild(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats)
{
const PxU32 numPrimitives = params.mNbPrimitives;
if(!numPrimitives)
return NULL;
// Init stats
stats.setCount(1);
// Initialize indices. This list will be modified during build.
PxU32* indices = PX_ALLOCATE(PxU32, numPrimitives, "AABB tree indices");
// Identity permutation
for(PxU32 i=0;i<numPrimitives;i++)
indices[i] = i;
// Allocate a pool of nodes
nodeAllocator.init(numPrimitives, params.mLimit);
// Compute box centers only once and cache them
params.mCache = PX_ALLOCATE(PxVec3, (numPrimitives+1), "cache");
const PxBounds3* PX_RESTRICT boxes = params.mBounds->getBounds();
const float half = 0.5f;
const FloatV halfV = FLoad(half);
for(PxU32 i=0;i<numPrimitives;i++)
{
const Vec4V curMinV = V4LoadU(&boxes[i].minimum.x);
const Vec4V curMaxV = V4LoadU(&boxes[i].maximum.x);
const Vec4V centerV = V4Scale(V4Add(curMaxV, curMinV), halfV);
V4StoreU(centerV, ¶ms.mCache[i].x);
}
return indices;
}
PxU32* Gu::buildAABBTree(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats)
{
// initialize the build first
PxU32* indices = initAABBTreeBuild(params, nodeAllocator, stats);
if(!indices)
return NULL;
// Build the hierarchy
if(params.mBuildStrategy==BVH_SAH)
{
SAH_Buffers buffers(params.mNbPrimitives);
nodeAllocator.mPool->_buildHierarchySAH(params, buffers, stats, nodeAllocator, indices);
}
else
nodeAllocator.mPool->_buildHierarchy(params, stats, nodeAllocator, indices);
return indices;
}
void Gu::flattenTree(const NodeAllocator& nodeAllocator, BVHNode* dest, const PxU32* remap)
{
// PT: gathers all build nodes allocated so far and flatten them to a linear destination array of smaller runtime nodes
PxU32 offset = 0;
const PxU32 nbSlabs = nodeAllocator.mSlabs.size();
for(PxU32 s=0;s<nbSlabs;s++)
{
const NodeAllocator::Slab& currentSlab = nodeAllocator.mSlabs[s];
AABBTreeBuildNode* pool = currentSlab.mPool;
for(PxU32 i=0;i<currentSlab.mNbUsedNodes;i++)
{
dest[offset].mBV = pool[i].mBV;
if(pool[i].isLeaf())
{
PxU32 index = pool[i].mNodeIndex;
if(remap)
index = remap[index];
const PxU32 nbPrims = pool[i].getNbPrimitives();
PX_ASSERT(nbPrims<16);
dest[offset].mData = (index<<5)|((nbPrims&15)<<1)|1;
}
else
{
PX_ASSERT(pool[i].mPos);
PxU32 localNodeIndex = 0xffffffff;
PxU32 nodeBase = 0;
for(PxU32 j=0;j<nbSlabs;j++)
{
if(pool[i].mPos >= nodeAllocator.mSlabs[j].mPool && pool[i].mPos < nodeAllocator.mSlabs[j].mPool + nodeAllocator.mSlabs[j].mNbUsedNodes)
{
localNodeIndex = PxU32(pool[i].mPos - nodeAllocator.mSlabs[j].mPool);
break;
}
nodeBase += nodeAllocator.mSlabs[j].mNbUsedNodes;
}
const PxU32 nodeIndex = nodeBase + localNodeIndex;
dest[offset].mData = nodeIndex << 1;
}
offset++;
}
}
}
void Gu::buildAABBTree(PxU32 nbBounds, const AABBTreeBounds& bounds, PxArray<BVHNode>& tree)
{
PX_SIMD_GUARD
// build the BVH
BuildStats stats;
NodeAllocator nodeAllocator;
PxU32* indices = buildAABBTree(AABBTreeBuildParams(1, nbBounds, &bounds), nodeAllocator, stats);
PX_ASSERT(indices);
// store the computed hierarchy
tree.resize(stats.getCount());
PX_ASSERT(tree.size() == nodeAllocator.mTotalNbNodes);
// store the results into BVHNode list
flattenTree(nodeAllocator, tree.begin(), indices);
PX_FREE(indices); // PT: we don't need the indices for a complete tree
}
///////////////////////////////////////////////////////////////////////////////
// Progressive building
class Gu::FIFOStack : public PxUserAllocated
{
public:
FIFOStack() : mStack("SQFIFOStack"), mCurIndex(0) {}
~FIFOStack() {}
PX_FORCE_INLINE PxU32 getNbEntries() const { return mStack.size(); }
PX_FORCE_INLINE void push(AABBTreeBuildNode* entry) { mStack.pushBack(entry); }
bool pop(AABBTreeBuildNode*& entry);
private:
PxArray<AABBTreeBuildNode*> mStack;
PxU32 mCurIndex; //!< Current index within the container
};
bool Gu::FIFOStack::pop(AABBTreeBuildNode*& entry)
{
const PxU32 NbEntries = mStack.size(); // Get current number of entries
if (!NbEntries)
return false; // Can be NULL when no value has been pushed. This is an invalid pop call.
entry = mStack[mCurIndex++]; // Get oldest entry, move to next one
if (mCurIndex == NbEntries)
{
// All values have been poped
mStack.clear();
mCurIndex = 0;
}
return true;
}
//~Progressive building
///////////////////////////////////////////////////////////////////////////////
BVHPartialRefitData::BVHPartialRefitData() : mParentIndices(NULL), mUpdateMap(NULL), mRefitHighestSetWord(0)
{
}
BVHPartialRefitData::~BVHPartialRefitData()
{
releasePartialRefitData(true);
}
void BVHPartialRefitData::releasePartialRefitData(bool clearRefitMap)
{
PX_FREE(mParentIndices);
PX_FREE(mUpdateMap);
if(clearRefitMap)
mRefitBitmask.clearAll();
mRefitHighestSetWord = 0;
}
static void createParentArray(PxU32 totalNbNodes, PxU32* parentIndices, const BVHNode* parentNode, const BVHNode* currentNode, const BVHNode* root)
{
const PxU32 parentIndex = PxU32(parentNode - root);
const PxU32 currentIndex = PxU32(currentNode - root);
PX_ASSERT(parentIndex<totalNbNodes);
PX_ASSERT(currentIndex<totalNbNodes);
PX_UNUSED(totalNbNodes);
parentIndices[currentIndex] = parentIndex;
if(!currentNode->isLeaf())
{
createParentArray(totalNbNodes, parentIndices, currentNode, currentNode->getPos(root), root);
createParentArray(totalNbNodes, parentIndices, currentNode, currentNode->getNeg(root), root);
}
}
PxU32* BVHPartialRefitData::getParentIndices()
{
// PT: lazy-create parent array. Memory is not wasted for purely static trees, or dynamic trees that only do "full refit".
if(!mParentIndices)
{
mParentIndices = PX_ALLOCATE(PxU32, mNbNodes, "AABB parent indices");
createParentArray(mNbNodes, mParentIndices, mNodes, mNodes, mNodes);
}
return mParentIndices;
}
void BVHPartialRefitData::createUpdateMap(PxU32 nbObjects)
{
// PT: we need an "update map" for PxBVH
// PT: TODO: consider refactoring with the AABBtree version
PX_FREE(mUpdateMap);
if(!nbObjects)
return;
mUpdateMap = PX_ALLOCATE(PxU32, nbObjects, "UpdateMap");
PxMemSet(mUpdateMap, 0xff, sizeof(PxU32)*nbObjects);
const PxU32 nbNodes = mNbNodes;
const BVHNode* nodes = mNodes;
const PxU32* indices = mIndices;
for(TreeNodeIndex i=0;i<nbNodes;i++)
{
if(nodes[i].isLeaf())
{
const PxU32 nbPrims = nodes[i].getNbRuntimePrimitives();
if(indices)
{
// PT: with multiple primitives per node, several mapping entries will point to the same node.
PX_ASSERT(nbPrims<16);
for(PxU32 j=0;j<nbPrims;j++)
{
const PxU32 index = nodes[i].getPrimitives(indices)[j];
PX_ASSERT(index<nbObjects);
mUpdateMap[index] = i;
}
}
else
{
PX_ASSERT(nbPrims==1);
const PxU32 index = nodes[i].getPrimitiveIndex();
PX_ASSERT(index<nbObjects);
mUpdateMap[index] = i;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE PxU32 BitsToDwords(PxU32 nb_bits)
{
return (nb_bits>>5) + ((nb_bits&31) ? 1 : 0);
}
bool BitArray::init(PxU32 nb_bits)
{
mSize = BitsToDwords(nb_bits);
// Get ram for n bits
PX_FREE(mBits);
mBits = PX_ALLOCATE(PxU32, mSize, "BitArray::mBits");
// Set all bits to 0
clearAll();
return true;
}
void BitArray::resize(PxU32 maxBitNumber)
{
const PxU32 newSize = BitsToDwords(maxBitNumber);
if (newSize <= mSize)
return;
PxU32* newBits = PX_ALLOCATE(PxU32, newSize, "BitArray::mBits");
PxMemZero(newBits + mSize, (newSize - mSize) * sizeof(PxU32));
PxMemCopy(newBits, mBits, mSize*sizeof(PxU32));
PX_FREE(mBits);
mBits = newBits;
mSize = newSize;
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE PxU32 getNbPrimitives(PxU32 data) { return (data>>1)&15; }
static PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* base, PxU32 data) { return base + (data>>5); }
static PX_FORCE_INLINE const BVHNode* getPos(const BVHNode* base, PxU32 data) { return base + (data>>1); }
static PX_FORCE_INLINE PxU32 isLeaf(PxU32 data) { return data&1; }
template<const bool hasIndices>
static PX_FORCE_INLINE void refitNode(BVHNode* PX_RESTRICT current, const PxBounds3* PX_RESTRICT boxes, const PxU32* PX_RESTRICT indices, BVHNode* PX_RESTRICT const nodeBase)
{
// PT: we can safely use V4 loads on both boxes and nodes here:
// - it's safe on boxes because we allocated one extra box in the pruning pool
// - it's safe on nodes because there's always some data within the node, after the BV
const PxU32 data = current->mData;
Vec4V resultMinV, resultMaxV;
if(isLeaf(data))
{
const PxU32 nbPrims = getNbPrimitives(data);
if(nbPrims)
{
if(hasIndices)
{
const PxU32* primitives = getPrimitives(indices, data);
resultMinV = V4LoadU(&boxes[*primitives].minimum.x);
resultMaxV = V4LoadU(&boxes[*primitives].maximum.x);
if(nbPrims>1)
{
const PxU32* last = primitives + nbPrims;
primitives++;
while(primitives!=last)
{
resultMinV = V4Min(resultMinV, V4LoadU(&boxes[*primitives].minimum.x));
resultMaxV = V4Max(resultMaxV, V4LoadU(&boxes[*primitives].maximum.x));
primitives++;
}
}
}
else
{
PX_ASSERT(nbPrims==1);
const PxU32 primIndex = data>>5;
resultMinV = V4LoadU(&boxes[primIndex].minimum.x);
resultMaxV = V4LoadU(&boxes[primIndex].maximum.x);
}
}
else
{
// Might happen after a node has been invalidated
const float max = GU_EMPTY_BOUNDS_EXTENTS;
resultMinV = V4Load(max);
resultMaxV = V4Load(-max);
}
}
else
{
const BVHNode* pos = getPos(nodeBase, data);
const BVHNode* neg = pos+1;
const PxBounds3& posBox = pos->mBV;
const PxBounds3& negBox = neg->mBV;
resultMinV = V4Min(V4LoadU(&posBox.minimum.x), V4LoadU(&negBox.minimum.x));
// resultMaxV = V4Max(V4LoadU(&posBox.maximum.x), V4LoadU(&negBox.maximum.x));
#if PX_INTEL_FAMILY && !defined(PX_SIMD_DISABLED)
Vec4V posMinV = V4LoadU(&posBox.minimum.z);
Vec4V negMinV = V4LoadU(&negBox.minimum.z);
posMinV = _mm_shuffle_ps(posMinV, posMinV, _MM_SHUFFLE(0, 3, 2, 1));
negMinV = _mm_shuffle_ps(negMinV, negMinV, _MM_SHUFFLE(0, 3, 2, 1));
resultMaxV = V4Max(posMinV, negMinV);
#else
// PT: fixes the perf issue but not really convincing
resultMaxV = Vec4V_From_Vec3V(V3Max(V3LoadU(&posBox.maximum.x), V3LoadU(&negBox.maximum.x)));
#endif
}
// PT: the V4 stores overwrite the data after the BV, but we just put it back afterwards
V4StoreU(resultMinV, ¤t->mBV.minimum.x);
V4StoreU(resultMaxV, ¤t->mBV.maximum.x);
current->mData = data;
}
template<const bool hasIndices>
static void refitLoop(const PxBounds3* PX_RESTRICT boxes, BVHNode* const PX_RESTRICT nodeBase, const PxU32* PX_RESTRICT indices, PxU32 nbNodes)
{
PX_ASSERT(boxes);
PX_ASSERT(nodeBase);
// Bottom-up update
PxU32 index = nbNodes;
while(index--)
{
BVHNode* current = nodeBase + index;
if(index)
PxPrefetch(current - 1);
// PxBounds3 before = current->mBV;
if(hasIndices)
refitNode<1>(current, boxes, indices, nodeBase);
else
refitNode<0>(current, boxes, indices, nodeBase);
// if(current->mBV.minimum==before.minimum && current->mBV.maximum==before.maximum)
// break;
}
}
void BVHCoreData::fullRefit(const PxBounds3* boxes)
{
if(mIndices)
refitLoop<1>(boxes, mNodes, mIndices, mNbNodes);
else
refitLoop<0>(boxes, mNodes, mIndices, mNbNodes);
}
void BVHPartialRefitData::markNodeForRefit(TreeNodeIndex nodeIndex)
{
BitArray* PX_RESTRICT refitBitmask = &mRefitBitmask;
if(!refitBitmask->getBits())
refitBitmask->init(mNbNodes);
PX_ASSERT(nodeIndex<mNbNodes);
const PxU32* PX_RESTRICT parentIndices = getParentIndices();
PxU32 refitHighestSetWord = mRefitHighestSetWord;
PxU32 currentIndex = nodeIndex;
while(1)
{
PX_ASSERT(currentIndex<mNbNodes);
if(refitBitmask->isSet(currentIndex))
{
// We can early exit if we already visited the node!
goto Exit;
}
else
{
refitBitmask->setBit(currentIndex);
const PxU32 currentMarkedWord = currentIndex>>5;
refitHighestSetWord = PxMax(refitHighestSetWord, currentMarkedWord);
const PxU32 parentIndex = parentIndices[currentIndex];
PX_ASSERT(parentIndex == 0 || parentIndex < currentIndex);
if(currentIndex == parentIndex)
break;
currentIndex = parentIndex;
}
}
Exit:
mRefitHighestSetWord = refitHighestSetWord;
}
#define FIRST_VERSION
#ifdef FIRST_VERSION
template<const bool hasIndices>
static void refitMarkedLoop(const PxBounds3* PX_RESTRICT boxes, BVHNode* const PX_RESTRICT nodeBase, const PxU32* PX_RESTRICT indices, PxU32* PX_RESTRICT bits, PxU32 nbToGo)
{
#ifdef _DEBUG
PxU32 nbRefit=0;
#endif
PxU32 size = nbToGo;
while(size--)
{
// Test 32 bits at a time
const PxU32 currentBits = bits[size];
if(!currentBits)
continue;
PxU32 index = (size+1)<<5;
PxU32 mask = PxU32(1<<((index-1)&31));
PxU32 count=32;
while(count--)
{
index--;
PxPrefetch(nodeBase + index);
PX_ASSERT(size==index>>5);
PX_ASSERT(mask==PxU32(1<<(index&31)));
if(currentBits & mask)
{
if(hasIndices)
refitNode<1>(nodeBase + index, boxes, indices, nodeBase);
else
refitNode<0>(nodeBase + index, boxes, indices, nodeBase);
#ifdef _DEBUG
nbRefit++;
#endif
}
mask>>=1;
}
bits[size] = 0;
}
}
void BVHPartialRefitData::refitMarkedNodes(const PxBounds3* boxes)
{
if(!mRefitBitmask.getBits())
return; // No refit needed
{
/*const*/ PxU32* bits = const_cast<PxU32*>(mRefitBitmask.getBits());
PxU32 size = mRefitHighestSetWord+1;
#ifdef _DEBUG
if(1)
{
const PxU32 totalSize = mRefitBitmask.getSize();
for(PxU32 i=size;i<totalSize;i++)
{
PX_ASSERT(!bits[i]);
}
}
#endif
if(mIndices)
refitMarkedLoop<1>(boxes, mNodes, mIndices, bits, size);
else
refitMarkedLoop<0>(boxes, mNodes, mIndices, bits, size);
mRefitHighestSetWord = 0;
// mRefitBitmask.clearAll();
}
}
#endif
//#define SECOND_VERSION
#ifdef SECOND_VERSION
void BVHPartialRefitData::refitMarkedNodes(const PxBounds3* boxes)
{
/*const*/ PxU32* bits = const_cast<PxU32*>(mRefitBitmask.getBits());
if(!bits)
return; // No refit needed
const PxU32 lastSetBit = mRefitBitmask.findLast();
const PxU32* indices = mIndices;
BVHNode* const nodeBase = mNodes;
// PT: ### bitmap iterator pattern
for(PxU32 w = 0; w <= lastSetBit >> 5; ++w)
{
for(PxU32 b = bits[w]; b; b &= b-1)
{
const PxU32 index = (PxU32)(w<<5|PxLowestSetBit(b));
while(size--)
{
// Test 32 bits at a time
const PxU32 currentBits = bits[size];
if(!currentBits)
continue;
PxU32 index = (size+1)<<5;
PxU32 mask = PxU32(1<<((index-1)&31));
PxU32 count=32;
while(count--)
{
index--;
PxPrefetch(nodeBase + index);
PX_ASSERT(size==index>>5);
PX_ASSERT(mask==PxU32(1<<(index&31)));
if(currentBits & mask)
{
refitNode(nodeBase + index, boxes, indices, nodeBase);
#ifdef _DEBUG
nbRefit++;
#endif
}
mask>>=1;
}
bits[size] = 0;
}
mRefitHighestSetWord = 0;
// mRefitBitmask.clearAll();
}
}
#endif
///////////////////////////////////////////////////////////////////////////////
AABBTree::AABBTree() : mTotalPrims(0)
{
// Progressive building
mStack = NULL;
//~Progressive building
}
AABBTree::~AABBTree()
{
release(false);
}
void AABBTree::release(bool clearRefitMap)
{
// Progressive building
PX_DELETE(mStack);
//~Progressive building
releasePartialRefitData(clearRefitMap);
// PT: TODO: move some to BVHCoreData dtor
PX_DELETE_ARRAY(mNodes);
PX_FREE(mIndices);
mNbNodes = 0;
mNbIndices = 0;
}
// Initialize nodes/indices from the input tree merge data
void AABBTree::initTree(const AABBTreeMergeData& tree)
{
PX_ASSERT(mIndices == NULL);
PX_ASSERT(mNodes == NULL);
PX_ASSERT(mParentIndices == NULL);
// allocate,copy indices
mIndices = PX_ALLOCATE(PxU32, tree.mNbIndices, "AABB tree indices");
mNbIndices = tree.mNbIndices;
PxMemCopy(mIndices, tree.mIndices, sizeof(PxU32)*tree.mNbIndices);
// allocate,copy nodes
mNodes = PX_NEW(BVHNode)[tree.mNbNodes];
mNbNodes = tree.mNbNodes;
PxMemCopy(mNodes, tree.mNodes, sizeof(BVHNode)*tree.mNbNodes);
}
// Shift indices of the tree by offset. Used for merged trees, when initial indices needs to be shifted to match indices in current pruning pool
void AABBTree::shiftIndices(PxU32 offset)
{
for (PxU32 i = 0; i < mNbIndices; i++)
{
mIndices[i] += offset;
}
}
bool AABBTree::buildInit(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats)
{
// Checkings
const PxU32 nbPrimitives = params.mNbPrimitives;
if(!nbPrimitives)
return false;
// Release previous tree
release();
// Initialize indices. This list will be modified during build.
mNbIndices = nbPrimitives;
PxU32* indices = initAABBTreeBuild(params, nodeAllocator, stats);
if(!indices)
return false;
PX_ASSERT(!mIndices);
mIndices = indices;
return true;
}
void AABBTree::buildEnd(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, const BuildStats& stats)
{
PX_FREE(params.mCache);
// Get back total number of nodes
mNbNodes = stats.getCount();
mTotalPrims = stats.mTotalPrims;
mNodes = PX_NEW(BVHNode)[mNbNodes];
PX_ASSERT(mNbNodes==nodeAllocator.mTotalNbNodes);
flattenTree(nodeAllocator, mNodes);
nodeAllocator.release();
}
bool AABBTree::build(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator)
{
const PxU32 nbPrimitives = params.mNbPrimitives;
if(!nbPrimitives)
return false;
// Release previous tree
release();
BuildStats stats;
mNbIndices = nbPrimitives;
mIndices = buildAABBTree(params, nodeAllocator, stats);
if(!mIndices)
return false;
buildEnd(params, nodeAllocator, stats);
return true;
}
void AABBTree::shiftOrigin(const PxVec3& shift)
{
BVHNode* const nodeBase = mNodes;
const PxU32 totalNbNodes = mNbNodes;
for(PxU32 i=0; i<totalNbNodes; i++)
{
BVHNode& current = nodeBase[i];
if((i+1) < totalNbNodes)
PxPrefetch(nodeBase + i + 1);
current.mBV.minimum -= shift;
current.mBV.maximum -= shift;
}
}
// Progressive building
static PxU32 incrementalBuildHierarchy(FIFOStack& stack, AABBTreeBuildNode* node, const AABBTreeBuildParams& params, BuildStats& stats, NodeAllocator& nodeBase, PxU32* const indices)
{
node->subdivide(params, stats, nodeBase, indices);
if(!node->isLeaf())
{
AABBTreeBuildNode* pos = const_cast<AABBTreeBuildNode*>(node->getPos());
PX_ASSERT(pos);
AABBTreeBuildNode* neg = pos + 1;
stack.push(neg);
stack.push(pos);
}
stats.mTotalPrims += node->mNbPrimitives;
return node->mNbPrimitives;
}
PxU32 AABBTree::progressiveBuild(const AABBTreeBuildParams& params, NodeAllocator& nodeAllocator, BuildStats& stats, PxU32 progress, PxU32 limit)
{
if(progress==0)
{
if(!buildInit(params, nodeAllocator, stats))
return PX_INVALID_U32;
mStack = PX_NEW(FIFOStack);
mStack->push(nodeAllocator.mPool);
return progress++;
}
else if(progress==1)
{
PxU32 stackCount = mStack->getNbEntries();
if(stackCount)
{
PxU32 Total = 0;
const PxU32 Limit = limit;
while(Total<Limit)
{
AABBTreeBuildNode* Entry;
if(mStack->pop(Entry))
Total += incrementalBuildHierarchy(*mStack, Entry, params, stats, nodeAllocator, mIndices);
else
break;
}
return progress;
}
buildEnd(params, nodeAllocator, stats);
PX_DELETE(mStack);
return 0; // Done!
}
return PX_INVALID_U32;
}
//~Progressive building
PX_FORCE_INLINE static void setLeafData(PxU32& leafData, const BVHNode& node, const PxU32 indicesOffset)
{
const PxU32 index = indicesOffset + (node.mData >> 5);
const PxU32 nbPrims = node.getNbPrimitives();
PX_ASSERT(nbPrims < 16);
leafData = (index << 5) | ((nbPrims & 15) << 1) | 1;
}
// Copy the tree into nodes. Update node indices, leaf indices.
void AABBTree::addRuntimeChilds(PxU32& nodeIndex, const AABBTreeMergeData& treeParams)
{
PX_ASSERT(nodeIndex < mNbNodes + treeParams.mNbNodes + 1);
const PxU32 baseNodeIndex = nodeIndex;
// copy the src tree into dest tree nodes, update its data
for (PxU32 i = 0; i < treeParams.mNbNodes; i++)
{
PX_ASSERT(nodeIndex < mNbNodes + treeParams.mNbNodes + 1);
mNodes[nodeIndex].mBV = treeParams.mNodes[i].mBV;
if (treeParams.mNodes[i].isLeaf())
{
setLeafData(mNodes[nodeIndex].mData, treeParams.mNodes[i], mNbIndices);
}
else
{
const PxU32 srcNodeIndex = baseNodeIndex + (treeParams.mNodes[i].getPosIndex());
mNodes[nodeIndex].mData = srcNodeIndex << 1;
mParentIndices[srcNodeIndex] = nodeIndex;
mParentIndices[srcNodeIndex + 1] = nodeIndex;
}
nodeIndex++;
}
}
// Merge tree into targetNode, where target node is a leaf
// 1. Allocate new nodes/parent, copy all the nodes/parents
// 2. Create new node at the end, copy the data from target node
// 3. Copy the merge tree after the new node, create the parent map for them, update the leaf indices
// Schematic view:
// Target Nodes: ...Tn...
// Input tree: R1->Rc0, Rc1...
// Merged tree: ...Tnc->...->Nc0,R1->Rc0,Rc1...
// where new node: Nc0==Tn and Tnc is not a leaf anymore and points to Nc0
void AABBTree::mergeRuntimeLeaf(BVHNode& targetNode, const AABBTreeMergeData& treeParams, PxU32 targetMergeNodeIndex)
{
PX_ASSERT(mParentIndices);
PX_ASSERT(targetNode.isLeaf());
// 1. Allocate new nodes/parent, copy all the nodes/parents
// allocate new runtime pool with max combine number of nodes
// we allocate only 1 additional node each merge
BVHNode* newRuntimePool = PX_NEW(BVHNode)[mNbNodes + treeParams.mNbNodes + 1];
PxU32* newParentIndices = PX_ALLOCATE(PxU32, (mNbNodes + treeParams.mNbNodes + 1), "AABB parent indices");
// copy the whole target nodes, we will add the new node at the end together with the merge tree
PxMemCopy(newRuntimePool, mNodes, sizeof(BVHNode)*(mNbNodes));
PxMemCopy(newParentIndices, mParentIndices, sizeof(PxU32)*(mNbNodes));
// 2. Create new node at the end, copy the data from target node
PxU32 nodeIndex = mNbNodes;
// copy the targetNode at the end of the new nodes
newRuntimePool[nodeIndex].mBV = targetNode.mBV;
newRuntimePool[nodeIndex].mData = targetNode.mData;
// update the parent information
newParentIndices[nodeIndex] = targetMergeNodeIndex;
// mark for refit
if (mRefitBitmask.getBits() && mRefitBitmask.isSet(targetMergeNodeIndex))
{
mRefitBitmask.setBit(nodeIndex);
const PxU32 currentMarkedWord = nodeIndex >> 5;
mRefitHighestSetWord = PxMax(mRefitHighestSetWord, currentMarkedWord);
}
// swap pointers
PX_DELETE_ARRAY(mNodes);
mNodes = newRuntimePool;
PX_FREE(mParentIndices);
mParentIndices = newParentIndices;
// 3. Copy the merge tree after the new node, create the parent map for them, update the leaf indices
nodeIndex++;
addRuntimeChilds(nodeIndex, treeParams);
PX_ASSERT(nodeIndex == mNbNodes + 1 + treeParams.mNbNodes);
// update the parent information for the input tree root node
mParentIndices[mNbNodes + 1] = targetMergeNodeIndex;
// fix the child information for the target node, was a leaf before
mNodes[targetMergeNodeIndex].mData = mNbNodes << 1;
// update the total number of nodes
mNbNodes = mNbNodes + 1 + treeParams.mNbNodes;
}
// Merge tree into targetNode, where target node is not a leaf
// 1. Allocate new nodes/parent, copy the nodes/parents till targetNodePosIndex
// 2. Create new node , copy the data from target node
// 3. Copy the rest of the target tree nodes/parents at the end -> targetNodePosIndex + 1 + treeParams.mNbNodes
// 4. Copy the merge tree after the new node, create the parent map for them, update the leaf indices
// 5. Go through the nodes copied at the end and fix the parents/childs
// Schematic view:
// Target Nodes: ...Tn->...->Tc0,Tc1...
// Input tree: R1->Rc0, Rc1...
// Merged tree: ...Tn->...->Nc0,R1->Rc0,Rc1...,Tc0,Tc1...
// where new node: Nc0->...->Tc0,Tc1
void AABBTree::mergeRuntimeNode(BVHNode& targetNode, const AABBTreeMergeData& treeParams, PxU32 targetMergeNodeIndex)
{
PX_ASSERT(mParentIndices);
PX_ASSERT(!targetNode.isLeaf());
// Get the target node child pos, this is where we insert the new node and the input tree
const PxU32 targetNodePosIndex = targetNode.getPosIndex();
// 1. Allocate new nodes/parent, copy the nodes/parents till targetNodePosIndex
// allocate new runtime pool with max combine number of nodes
// we allocate only 1 additional node each merge
BVHNode* newRuntimePool = PX_NEW(BVHNode)[mNbNodes + treeParams.mNbNodes + 1];
PxU32* newParentIndices = PX_ALLOCATE(PxU32, (mNbNodes + treeParams.mNbNodes + 1), "AABB parent indices");
// copy the untouched part of the nodes and parents
PxMemCopy(newRuntimePool, mNodes, sizeof(BVHNode)*(targetNodePosIndex));
PxMemCopy(newParentIndices, mParentIndices, sizeof(PxU32)*(targetNodePosIndex));
PxU32 nodeIndex = targetNodePosIndex;
// 2. Create new node , copy the data from target node
newRuntimePool[nodeIndex].mBV = targetNode.mBV;
newRuntimePool[nodeIndex].mData = ((targetNode.mData >> 1) + 1 + treeParams.mNbNodes) << 1;
// update parent information
newParentIndices[nodeIndex] = targetMergeNodeIndex;
// handle mark for refit
if(mRefitBitmask.getBits() && mRefitBitmask.isSet(targetMergeNodeIndex))
{
mRefitBitmask.setBit(nodeIndex);
const PxU32 currentMarkedWord = nodeIndex >> 5;
mRefitHighestSetWord = PxMax(mRefitHighestSetWord, currentMarkedWord);
}
// 3. Copy the rest of the target tree nodes/parents at the end -> targetNodePosIndex + 1 + treeParams.mNbNodes
if(mNbNodes - targetNodePosIndex)
{
PX_ASSERT(mNbNodes - targetNodePosIndex > 0);
PxMemCopy(newRuntimePool + targetNodePosIndex + 1 + treeParams.mNbNodes, mNodes + targetNodePosIndex, sizeof(BVHNode)*(mNbNodes - targetNodePosIndex));
PxMemCopy(newParentIndices + targetNodePosIndex + 1 + treeParams.mNbNodes, mParentIndices + targetNodePosIndex, sizeof(PxU32)*(mNbNodes - targetNodePosIndex));
}
// swap the pointers, release the old memory
PX_DELETE_ARRAY(mNodes);
mNodes = newRuntimePool;
PX_FREE(mParentIndices);
mParentIndices = newParentIndices;
// 4. Copy the merge tree after the new node, create the parent map for them, update the leaf indices
nodeIndex++;
addRuntimeChilds(nodeIndex, treeParams);
PX_ASSERT(nodeIndex == targetNodePosIndex + 1 + treeParams.mNbNodes);
// update the total number of nodes
mNbNodes = mNbNodes + 1 + treeParams.mNbNodes;
// update the parent information for the input tree root node
mParentIndices[targetNodePosIndex + 1] = targetMergeNodeIndex;
// 5. Go through the nodes copied at the end and fix the parents/childs
for (PxU32 i = targetNodePosIndex + 1 + treeParams.mNbNodes; i < mNbNodes; i++)
{
// check if the parent is the targetNode, if yes update the parent to new node
if(mParentIndices[i] == targetMergeNodeIndex)
{
mParentIndices[i] = targetNodePosIndex;
}
else
{
// if parent node has been moved, update the parent node
if(mParentIndices[i] >= targetNodePosIndex)
{
mParentIndices[i] = mParentIndices[i] + 1 + treeParams.mNbNodes;
}
else
{
// if parent has not been moved, update its child information
const PxU32 parentIndex = mParentIndices[i];
// update the child information to point to Pos child
if(i % 2 != 0)
{
const PxU32 srcNodeIndex = mNodes[parentIndex].getPosIndex();
// if child index points to a node that has been moved, update the child index
PX_ASSERT(!mNodes[parentIndex].isLeaf());
PX_ASSERT(srcNodeIndex > targetNodePosIndex);
mNodes[parentIndex].mData = (1 + treeParams.mNbNodes + srcNodeIndex) << 1;
}
}
}
if(!mNodes[i].isLeaf())
{
// update the child node index
const PxU32 srcNodeIndex = 1 + treeParams.mNbNodes + mNodes[i].getPosIndex();
mNodes[i].mData = srcNodeIndex << 1;
}
}
}
// traverse the target node, the tree is inside the targetNode, and find the best place where merge the tree
void AABBTree::traverseRuntimeNode(BVHNode& targetNode, const AABBTreeMergeData& treeParams, PxU32 nodeIndex)
{
const BVHNode& srcNode = treeParams.getRootNode();
PX_ASSERT(srcNode.mBV.isInside(targetNode.mBV));
// Check if the srcNode(tree) can fit inside any of the target childs. If yes, traverse the target tree child
BVHNode& targetPosChild = *targetNode.getPos(mNodes);
if(srcNode.mBV.isInside(targetPosChild.mBV))
{
return traverseRuntimeNode(targetPosChild, treeParams, targetNode.getPosIndex());
}
BVHNode& targetNegChild = *targetNode.getNeg(mNodes);
if (srcNode.mBV.isInside(targetNegChild.mBV))
{
return traverseRuntimeNode(targetNegChild, treeParams, targetNode.getNegIndex());
}
// we cannot traverse target anymore, lets add the srcTree to current target node
if(targetNode.isLeaf())
mergeRuntimeLeaf(targetNode, treeParams, nodeIndex);
else
mergeRuntimeNode(targetNode, treeParams, nodeIndex);
}
// Merge the input tree into current tree.
// Traverse the tree and find the smallest node, where the whole new tree fits. When we find the node
// we create one new node pointing to the original children and the to the input tree root.
void AABBTree::mergeTree(const AABBTreeMergeData& treeParams)
{
// allocate new indices buffer
PxU32* newIndices = PX_ALLOCATE(PxU32, (mNbIndices + treeParams.mNbIndices), "AABB tree indices");
PxMemCopy(newIndices, mIndices, sizeof(PxU32)*mNbIndices);
PX_FREE(mIndices);
mIndices = newIndices;
mTotalPrims += treeParams.mNbIndices;
// copy the new indices, re-index using the provided indicesOffset. Note that indicesOffset
// must be provided, as original mNbIndices can be different than indicesOffset dues to object releases.
for (PxU32 i = 0; i < treeParams.mNbIndices; i++)
{
mIndices[mNbIndices + i] = treeParams.mIndicesOffset + treeParams.mIndices[i];
}
// check the mRefitBitmask if we fit all the new nodes
mRefitBitmask.resize(mNbNodes + treeParams.mNbNodes + 1);
// create the parent information so we can update it
getParentIndices();
// if new tree is inside the root AABB we will traverse the tree to find better node where to attach the tree subnodes
// if the root is a leaf we merge with the root.
if(treeParams.getRootNode().mBV.isInside(mNodes[0].mBV) && !mNodes[0].isLeaf())
{
traverseRuntimeNode(mNodes[0], treeParams, 0);
}
else
{
if(mNodes[0].isLeaf())
{
mergeRuntimeLeaf(mNodes[0], treeParams, 0);
}
else
{
mergeRuntimeNode(mNodes[0], treeParams, 0);
}
// increase the tree root AABB
mNodes[0].mBV.include(treeParams.getRootNode().mBV);
}
#ifdef _DEBUG
//verify parent indices
for (PxU32 i = 0; i < mNbNodes; i++)
{
if (i)
{
PX_ASSERT(mNodes[mParentIndices[i]].getPosIndex() == i || mNodes[mParentIndices[i]].getNegIndex() == i);
}
if (!mNodes[i].isLeaf())
{
PX_ASSERT(mParentIndices[mNodes[i].getPosIndex()] == i);
PX_ASSERT(mParentIndices[mNodes[i].getNegIndex()] == i);
}
}
// verify the tree nodes, leafs
for (PxU32 i = 0; i < mNbNodes; i++)
{
if (mNodes[i].isLeaf())
{
const PxU32 index = mNodes[i].mData >> 5;
const PxU32 nbPrim = mNodes[i].getNbPrimitives();
PX_ASSERT(index + nbPrim <= mNbIndices + treeParams.mNbIndices);
}
else
{
const PxU32 nodeIndex = (mNodes[i].getPosIndex());
PX_ASSERT(nodeIndex < mNbNodes);
}
}
#endif // _DEBUG
mNbIndices += treeParams.mNbIndices;
}
| 43,247 | C++ | 29.456338 | 182 | 0.703864 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuExtendedBucketPruner.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_EXTENDED_BUCKET_PRUNER_H
#define GU_EXTENDED_BUCKET_PRUNER_H
#include "GuPrunerTypedef.h"
#include "GuAABBTreeUpdateMap.h"
#include "foundation/PxHashMap.h"
#include "GuAABBTreeBounds.h"
#include "GuSecondaryPruner.h"
namespace physx
{
class PxRenderOutput;
namespace Gu
{
class AABBTreeMergeData;
// Extended bucket pruner data, if an object belongs to the tree of trees, we need to
// remember node for the sub tree, the tree it belongs to and the main tree node
struct ExtendedBucketPrunerData
{
PxU32 mTimeStamp; // timestamp
TreeNodeIndex mSubTreeNode; // sub tree node index
PxU32 mMergeIndex; // index in bounds and merged trees array
};
// Merged tree structure, holds tree and its timeStamp, released when no objects is in the tree
// or timeStamped objects are released
struct MergedTree
{
AABBTree* mTree; // AABB tree
size_t mTimeStamp; //
};
// hashing function for PrunerPayload key
// PT: TODO: move this to PrunerPayload?
struct ExtendedBucketPrunerHash
{
PX_FORCE_INLINE uint32_t operator()(const PrunerPayload& payload) const
{
#if PX_P64_FAMILY
// const PxU32 h0 = PxHash((const void*)payload.data[0]);
// const PxU32 h1 = PxHash((const void*)payload.data[1]);
const PxU32 h0 = PxU32(PX_MAX_U32 & payload.data[0]);
const PxU32 h1 = PxU32(PX_MAX_U32 & payload.data[1]);
return physx::PxComputeHash(PxU64(h0) | (PxU64(h1) << 32));
#else
return physx::PxComputeHash(PxU64(payload.data[0]) | (PxU64(payload.data[1]) << 32));
#endif
}
PX_FORCE_INLINE bool equal(const PrunerPayload& k0, const PrunerPayload& k1) const
{
return (k0.data[0] == k1.data[0]) && (k0.data[1] == k1.data[1]);
}
};
// A.B. replace, this is useless, need to be able to traverse the map and release while traversing, also eraseAt failed
typedef PxHashMap<PrunerPayload, ExtendedBucketPrunerData, ExtendedBucketPrunerHash> ExtendedBucketPrunerMap;
// Extended bucket pruner holds single objects in a bucket pruner and AABBtrees in a tree of trees.
// Base usage of ExtendedBucketPruner is for dynamic AABBPruner new objects, that did not make it
// into new tree. Single objects go directly into a bucket pruner, while merged AABBtrees
// go into a tree of trees.
// PT: TODO: this is not a Pruner (doesn't use the Pruner API) so its name should be e.g. "ExtendedBucketPrunerCore".
// And it's also not always using a bucket pruner... so the whole "ExtendedBucketPruner" name everywhere is wrong.
class ExtendedBucketPruner
{
public:
ExtendedBucketPruner(PxU64 contextID, CompanionPrunerType type, const PruningPool* pool);
~ExtendedBucketPruner();
// release
void release();
// add single object into a bucket pruner directly
PX_FORCE_INLINE bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, const PoolIndex poolIndex)
{
return mCompanion ? mCompanion->addObject(object, handle, worldAABB, transform, timeStamp, poolIndex) : true;
}
// add AABB tree from pruning structure - adds new primitive into main AABB tree
void addTree(const AABBTreeMergeData& mergeData, PxU32 timeStamp);
// update object
bool updateObject(const PxBounds3& worldAABB, const PxTransform& transform, const PrunerPayload& object, PrunerHandle handle, const PoolIndex poolIndex);
// remove object, removed object is replaced in pruning pool by swapped object, indices needs to be updated
bool removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex);
// swap object index, the object index can be in core pruner or tree of trees
void swapIndex(PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex, bool corePrunerIncluded = true);
// refit marked nodes in tree of trees
void refitMarkedNodes(const PxBounds3* boxes);
// notify timestampChange - swap trees in incremental pruner
PX_FORCE_INLINE void timeStampChange()
{
if(mCompanion)
mCompanion->timeStampChange();
}
// look for objects marked with input timestamp everywhere in the structure, and remove them. This is the same
// as calling 'removeObject' individually for all these objects, but much more efficient. Returns number of removed objects.
PxU32 removeMarkedObjects(PxU32 timeStamp);
// queries against the pruner
bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback&) const;
bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
// origin shift
void shiftOrigin(const PxVec3& shift);
// debug visualize
void visualize(PxRenderOutput& out, PxU32 color) const;
PX_FORCE_INLINE void build()
{
if(mCompanion)
mCompanion->build();
}
PX_FORCE_INLINE PxU32 getNbObjects() const
{
const PxU32 nb = mCompanion ? mCompanion->getNbObjects() : 0;
return nb + mExtendedBucketPrunerMap.size();
}
void getGlobalBounds(PxBounds3&) const;
private:
// separate call for indices invalidation, object can be either in AABBPruner or Bucket pruner, but the swapped object can be
// in the tree of trees
void invalidateObject(const ExtendedBucketPrunerData& object, PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex);
void resize(PxU32 size);
void buildMainAABBTree();
void cleanTrees();
#if PX_DEBUG
// Extended bucket pruner validity check
bool checkValidity();
#endif
CompanionPruner* mCompanion; // Companion pruner for single objects
const PruningPool* mPruningPool; // Pruning pool from AABB pruner
ExtendedBucketPrunerMap mExtendedBucketPrunerMap; // Map holding objects from tree merge - objects in tree of trees
AABBTree* mMainTree; // Main tree holding merged trees
AABBTreeUpdateMap mMainTreeUpdateMap; // Main tree updated map - merged trees index to nodes
AABBTreeUpdateMap mMergeTreeUpdateMap; // Merged tree update map used while tree is merged
AABBTreeBounds mBounds; // Merged trees bounds used for main tree building
MergedTree* mMergedTrees; // Merged trees
PxU32 mCurrentTreeIndex; // Current trees index
PxU32 mCurrentTreeCapacity; // Current tress capacity
bool mTreesDirty; // Dirty marker
};
}
}
#endif
| 8,527 | C | 44.121693 | 188 | 0.718658 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSqInternal.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSqInternal.h"
#include "CmVisualization.h"
#include "GuAABBTree.h"
#include "GuAABBTreeNode.h"
#include "GuIncrementalAABBTree.h"
#include "GuBVH.h"
using namespace physx;
using namespace Cm;
using namespace Gu;
static void drawBVH(const BVHNode* root, const BVHNode* node, PxRenderOutput& out_)
{
renderOutputDebugBox(out_, node->mBV);
if(node->isLeaf())
return;
drawBVH(root, node->getPos(root), out_);
drawBVH(root, node->getNeg(root), out_);
}
void visualizeTree(PxRenderOutput& out, PxU32 color, const BVH* tree)
{
if(tree && tree->getNodes())
{
out << PxTransform(PxIdentity);
out << color;
drawBVH(tree->getNodes(), tree->getNodes(), out);
}
}
void visualizeTree(PxRenderOutput& out, PxU32 color, const AABBTree* tree)
{
if(tree && tree->getNodes())
{
out << PxTransform(PxIdentity);
out << color;
drawBVH(tree->getNodes(), tree->getNodes(), out);
}
}
void visualizeTree(PxRenderOutput& out, PxU32 color, const IncrementalAABBTree* tree, DebugVizCallback* cb)
{
if(tree && tree->getNodes())
{
struct Local
{
static void _draw(const IncrementalAABBTreeNode* root, const IncrementalAABBTreeNode* node, PxRenderOutput& out_, DebugVizCallback* cb_)
{
PxBounds3 bounds;
V4StoreU(node->mBVMin, &bounds.minimum.x);
PX_ALIGN(16, PxVec4) max4;
V4StoreA(node->mBVMax, &max4.x);
bounds.maximum = PxVec3(max4.x, max4.y, max4.z);
bool discard = false;
if(cb_)
discard = cb_->visualizeNode(*node, bounds);
if(!discard)
Cm::renderOutputDebugBox(out_, bounds);
if(node->isLeaf())
return;
_draw(root, node->getPos(root), out_, cb_);
_draw(root, node->getNeg(root), out_, cb_);
}
};
out << PxTransform(PxIdentity);
out << color;
Local::_draw(tree->getNodes(), tree->getNodes(), out, cb);
}
}
| 3,507 | C++ | 33.392157 | 139 | 0.718848 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBox.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxIntrinsics.h"
#include "GuBoxConversion.h"
#include "GuInternal.h"
using namespace physx;
void Gu::Box::create(const Gu::Capsule& capsule)
{
// Box center = center of the two LSS's endpoints
center = capsule.computeCenter();
// Box orientation
const PxVec3 dir = capsule.p1 - capsule.p0;
const float d = dir.magnitude();
if(d!=0.0f)
{
rot.column0 = dir / d;
PxComputeBasisVectors(rot.column0, rot.column1, rot.column2);
}
else
rot = PxMat33(PxIdentity);
// Box extents
extents.x = capsule.radius + (d * 0.5f);
extents.y = capsule.radius;
extents.z = capsule.radius;
}
/**
Returns edges.
\return 24 indices (12 edges) indexing the list returned by ComputePoints()
*/
const PxU8* Gu::getBoxEdges()
{
// 7+------+6 0 = ---
// /| /| 1 = +--
// / | / | 2 = ++-
// / 4+---/--+5 3 = -+-
// 3+------+2 / y z 4 = --+
// | / | / | / 5 = +-+
// |/ |/ |/ 6 = +++
// 0+------+1 *---x 7 = -++
static PxU8 Indices[] = {
0, 1, 1, 2, 2, 3, 3, 0,
7, 6, 6, 5, 5, 4, 4, 7,
1, 5, 6, 2,
3, 7, 4, 0
};
return Indices;
}
void Gu::computeOBBPoints(PxVec3* PX_RESTRICT pts, const PxVec3& center, const PxVec3& extents, const PxVec3& base0, const PxVec3& base1, const PxVec3& base2)
{
PX_ASSERT(pts);
// "Rotated extents"
const PxVec3 axis0 = base0 * extents.x;
const PxVec3 axis1 = base1 * extents.y;
const PxVec3 axis2 = base2 * extents.z;
// 7+------+6 0 = ---
// /| /| 1 = +--
// / | / | 2 = ++-
// / 4+---/--+5 3 = -+-
// 3+------+2 / y z 4 = --+
// | / | / | / 5 = +-+
// |/ |/ |/ 6 = +++
// 0+------+1 *---x 7 = -++
// Original code: 24 vector ops
/* pts[0] = box.center - Axis0 - Axis1 - Axis2;
pts[1] = box.center + Axis0 - Axis1 - Axis2;
pts[2] = box.center + Axis0 + Axis1 - Axis2;
pts[3] = box.center - Axis0 + Axis1 - Axis2;
pts[4] = box.center - Axis0 - Axis1 + Axis2;
pts[5] = box.center + Axis0 - Axis1 + Axis2;
pts[6] = box.center + Axis0 + Axis1 + Axis2;
pts[7] = box.center - Axis0 + Axis1 + Axis2;*/
// Rewritten: 12 vector ops
pts[0] = pts[3] = pts[4] = pts[7] = center - axis0;
pts[1] = pts[2] = pts[5] = pts[6] = center + axis0;
PxVec3 tmp = axis1 + axis2;
pts[0] -= tmp;
pts[1] -= tmp;
pts[6] += tmp;
pts[7] += tmp;
tmp = axis1 - axis2;
pts[2] += tmp;
pts[3] += tmp;
pts[4] -= tmp;
pts[5] -= tmp;
}
| 4,125 | C++ | 31.234375 | 158 | 0.620848 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBTreeQuery.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREEQUERY_H
#define GU_AABBTREEQUERY_H
#include "GuBVHTestsSIMD.h"
#include "GuAABBTreeBounds.h"
#include "foundation/PxInlineArray.h"
#include "GuAABBTreeNode.h"
namespace physx
{
namespace Gu
{
#define RAW_TRAVERSAL_STACK_SIZE 256
//////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE void getBoundsTimesTwo(Vec4V& center, Vec4V& extents, const PxBounds3* bounds, PxU32 poolIndex)
{
const PxBounds3* objectBounds = bounds + poolIndex;
// PT: it's safe to V4LoadU because the pointer comes from the AABBTreeBounds class
const Vec4V minV = V4LoadU(&objectBounds->minimum.x);
const Vec4V maxV = V4LoadU(&objectBounds->maximum.x);
center = V4Add(maxV, minV);
extents = V4Sub(maxV, minV);
}
//////////////////////////////////////////////////////////////////////////
template<const bool tHasIndices, typename Test, typename Node, typename QueryCallback>
static PX_FORCE_INLINE bool doOverlapLeafTest(const Test& test, const Node* node, const PxBounds3* bounds, const PxU32* indices, QueryCallback& visitor)
{
PxU32 nbPrims = node->getNbPrimitives();
const bool doBoxTest = nbPrims > 1;
const PxU32* prims = tHasIndices ? node->getPrimitives(indices) : NULL;
while(nbPrims--)
{
const PxU32 primIndex = tHasIndices ? *prims++ : node->getPrimitiveIndex();
if(doBoxTest)
{
Vec4V center2, extents2;
getBoundsTimesTwo(center2, extents2, bounds, primIndex);
const float half = 0.5f;
const FloatV halfV = FLoad(half);
const Vec4V extents_ = V4Scale(extents2, halfV);
const Vec4V center_ = V4Scale(center2, halfV);
if(!test(Vec3V_From_Vec4V(center_), Vec3V_From_Vec4V(extents_)))
continue;
}
if(!visitor.invoke(primIndex))
return false;
}
return true;
}
template<const bool tHasIndices, typename Test, typename Tree, typename Node, typename QueryCallback>
class AABBTreeOverlap
{
public:
bool operator()(const AABBTreeBounds& treeBounds, const Tree& tree, const Test& test, QueryCallback& visitor)
{
const PxBounds3* bounds = treeBounds.getBounds();
PxInlineArray<const Node*, RAW_TRAVERSAL_STACK_SIZE> stack;
stack.forceSize_Unsafe(RAW_TRAVERSAL_STACK_SIZE);
const Node* const nodeBase = tree.getNodes();
stack[0] = nodeBase;
PxU32 stackIndex = 1;
while(stackIndex > 0)
{
const Node* node = stack[--stackIndex];
Vec3V center, extents;
node->getAABBCenterExtentsV(¢er, &extents);
while(test(center, extents))
{
if(node->isLeaf())
{
if(!doOverlapLeafTest<tHasIndices, Test, Node>(test, node, bounds, tree.getIndices(), visitor))
return false;
break;
}
const Node* children = node->getPos(nodeBase);
node = children;
stack[stackIndex++] = children + 1;
if(stackIndex == stack.capacity())
stack.resizeUninitialized(stack.capacity() * 2);
node->getAABBCenterExtentsV(¢er, &extents);
}
}
return true;
}
};
//////////////////////////////////////////////////////////////////////////
template <const bool tInflate, const bool tHasIndices, typename Node, typename QueryCallback> // use inflate=true for sweeps, inflate=false for raycasts
static PX_FORCE_INLINE bool doLeafTest( const Node* node, Gu::RayAABBTest& test, const PxBounds3* bounds, const PxU32* indices, PxReal& maxDist, QueryCallback& pcb)
{
PxU32 nbPrims = node->getNbPrimitives();
const bool doBoxTest = nbPrims > 1;
const PxU32* prims = tHasIndices ? node->getPrimitives(indices) : NULL;
while(nbPrims--)
{
const PxU32 primIndex = tHasIndices ? *prims++ : node->getPrimitiveIndex();
if(doBoxTest)
{
Vec4V center_, extents_;
getBoundsTimesTwo(center_, extents_, bounds, primIndex);
if(!test.check<tInflate>(Vec3V_From_Vec4V(center_), Vec3V_From_Vec4V(extents_)))
continue;
}
// PT:
// - 'maxDist' is the current best distance. It can be seen as a "maximum allowed distance" (as passed to the
// template by users initially) but also as the "current minimum impact distance", so the name is misleading.
// Either way this is where we write & communicate the final/best impact distance to users.
//
// - the invoke function also takes a distance parameter, and this one is in/out. In input we must pass the
// current best distance to the leaf node, so that subsequent leaf-level queries can cull things away as
// much as possible. In output users return a shrunk distance value if they found a hit. We need to pass a
// copy of 'maxDist' ('md') since it would be too dangerous to rely on the arbitrary user code to always do
// the right thing. In particular if we'd pass 'maxDist' to invoke directly, and the called code would NOT
// respect the passed max value, it could potentially return a hit further than the best 'maxDist'. At which
// point the '(md < oldMaxDist)' test would fail but the damage would have already been done ('maxDist' would
// have already been overwritten with a larger value than before). Hence, we need 'md'.
//
// - now 'oldMaxDist' however is more subtle. In theory we wouldn't need it and we could just use '(md < maxDist)'
// in the test below. But that opens the door to subtle bugs: 'maxDist' is a reference to some value somewhere
// in the user's code, and we call the same user in invoke. It turns out that the invoke code can access and
// modify 'maxDist' on their side, even if we do not pass it to invoke. It's basically the same problem as
// before, but much more difficult to see. It does happen with the current PhysX implementations of the invoke
// functions: they modify the 'md' that we send them, but *also* 'maxDist' without the code below knowing
// about it. So the subsequent test fails again because md == maxDist. A potential solution would have been to
// work on a local copy of 'maxDist' in operator(), only writing out the final distance when returning from the
// function. Another solution used below is to introduce that local copy just here in the leaf code: that's
// where 'oldMaxDist' comes from.
PxReal oldMaxDist = maxDist;
PxReal md = maxDist;
if(!pcb.invoke(md, primIndex))
return false;
if(md < oldMaxDist)
{
maxDist = md;
test.setDistance(md);
}
}
return true;
}
//////////////////////////////////////////////////////////////////////////
template <const bool tInflate, const bool tHasIndices, typename Tree, typename Node, typename QueryCallback> // use inflate=true for sweeps, inflate=false for raycasts
class AABBTreeRaycast
{
public:
bool operator()(
const AABBTreeBounds& treeBounds, const Tree& tree,
const PxVec3& origin, const PxVec3& unitDir, PxReal& maxDist, const PxVec3& inflation,
QueryCallback& pcb)
{
const PxBounds3* bounds = treeBounds.getBounds();
// PT: we will pass center*2 and extents*2 to the ray-box code, to save some work per-box
// So we initialize the test with values multiplied by 2 as well, to get correct results
Gu::RayAABBTest test(origin*2.0f, unitDir*2.0f, maxDist, inflation*2.0f);
PxInlineArray<const Node*, RAW_TRAVERSAL_STACK_SIZE> stack;
stack.forceSize_Unsafe(RAW_TRAVERSAL_STACK_SIZE);
const Node* const nodeBase = tree.getNodes();
stack[0] = nodeBase;
PxU32 stackIndex = 1;
while(stackIndex--)
{
const Node* node = stack[stackIndex];
Vec3V center, extents;
node->getAABBCenterExtentsV2(¢er, &extents);
if(test.check<tInflate>(center, extents)) // TODO: try timestamp ray shortening to skip this
{
while(!node->isLeaf())
{
const Node* children = node->getPos(nodeBase);
Vec3V c0, e0;
children[0].getAABBCenterExtentsV2(&c0, &e0);
const PxU32 b0 = test.check<tInflate>(c0, e0);
Vec3V c1, e1;
children[1].getAABBCenterExtentsV2(&c1, &e1);
const PxU32 b1 = test.check<tInflate>(c1, e1);
if(b0 && b1) // if both intersect, push the one with the further center on the stack for later
{
// & 1 because FAllGrtr behavior differs across platforms
const PxU32 bit = FAllGrtr(V3Dot(V3Sub(c1, c0), test.mDir), FZero()) & 1;
stack[stackIndex++] = children + bit;
node = children + (1 - bit);
if(stackIndex == stack.capacity())
stack.resizeUninitialized(stack.capacity() * 2);
}
else if(b0)
node = children;
else if(b1)
node = children + 1;
else
goto skip_leaf_code;
}
if(!doLeafTest<tInflate, tHasIndices, Node>(node, test, bounds, tree.getIndices(), maxDist, pcb))
return false;
skip_leaf_code:;
}
}
return true;
}
};
struct TraversalControl
{
enum Enum {
eDontGoDeeper,
eGoDeeper,
eGoDeeperNegFirst,
eAbort
};
};
template<typename T>
void traverseBVH(const Gu::BVHNode* nodes, T& traversalController, PxI32 rootNodeIndex = 0)
{
PxI32 index = rootNodeIndex;
PxInlineArray<PxI32, RAW_TRAVERSAL_STACK_SIZE> todoStack;
while (true)
{
const Gu::BVHNode& a = nodes[index];
TraversalControl::Enum control = traversalController.analyze(a, index);
if (control == TraversalControl::eAbort)
return;
if (!a.isLeaf() && (control == TraversalControl::eGoDeeper || control == TraversalControl::eGoDeeperNegFirst))
{
if (control == TraversalControl::eGoDeeperNegFirst)
{
todoStack.pushBack(a.getPosIndex());
index = a.getNegIndex(); //index gets processed next - assign negative index to it
}
else
{
todoStack.pushBack(a.getNegIndex());
index = a.getPosIndex(); //index gets processed next - assign positive index to it
}
continue;
}
if (todoStack.empty()) break;
index = todoStack.popBack();
}
}
}
}
#endif // SQ_AABBTREEQUERY_H
| 11,774 | C | 37.733553 | 169 | 0.667658 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSweepTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "GuSweepTests.h"
#include "GuVecCapsule.h"
#include "GuVecBox.h"
#include "GuVecTriangle.h"
#include "GuSweepTriangleUtils.h"
#include "GuInternal.h"
#include "GuGJKRaycast.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace physx::aos;
//#define USE_VIRTUAL_GJK
#ifdef USE_VIRTUAL_GJK
static bool virtualGjkRaycastPenetration(const GjkConvex& a, const GjkConvex& b, const aos::Vec3VArg initialDir, const aos::FloatVArg initialLambda, const aos::Vec3VArg s, const aos::Vec3VArg r, aos::FloatV& lambda,
aos::Vec3V& normal, aos::Vec3V& closestA, const PxReal _inflation, const bool initialOverlap)
{
return gjkRaycastPenetration<GjkConvex, GjkConvex >(a, b, initialDir, initialLambda, s, r, lambda, normal, closestA, _inflation, initialOverlap);
}
#endif
bool sweepCapsule_BoxGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(hitFlags);
PX_UNUSED(threadContext);
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
const FloatV zero = FZero();
const Vec3V zeroV = V3Zero();
const Vec3V boxExtents0 = V3LoadU(boxGeom.halfExtents);
const FloatV dist = FLoad(distance);
const Vec3V worldDir = V3LoadU(unitDir);
const PxTransformV capPos = loadTransformU(capsulePose_);
const PxTransformV boxPos = loadTransformU(pose);
const PxMatTransformV aToB(boxPos.transformInv(capPos));
const FloatV capsuleHalfHeight = FLoad(capsuleGeom_.halfHeight);
const FloatV capsuleRadius = FLoad(lss.radius);
BoxV box(zeroV, boxExtents0);
CapsuleV capsule(aToB.p, aToB.rotate(V3Scale(V3UnitX(), capsuleHalfHeight)), capsuleRadius);
const Vec3V dir = boxPos.rotateInv(V3Neg(V3Scale(worldDir, dist)));
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi = FMax();
Vec3V closestA, normal;//closestA and normal is in the local space of box
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<BoxV> convexB(box);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), box.getCenter());
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, lss.radius + inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<LocalConvex<CapsuleV>, LocalConvex<BoxV> >(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, lss.radius + inflation, isMtd))
return false;
#endif
sweepHit.flags = PxHitFlag::eNORMAL;
if(FAllGrtrOrEq(zero, toi))
{
//initial overlap
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V worldPointA = boxPos.transform(closestA);
const Vec3V destNormal = boxPos.rotate(normal);
const FloatV length = toi;
const Vec3V destWorldPointA = V3NegScaleSub(destNormal, length, worldPointA);
V3StoreU(destWorldPointA, sweepHit.position);
V3StoreU(destNormal, sweepHit.normal);
FStore(length, &sweepHit.distance);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
}
else
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V worldPointA = boxPos.transform(closestA);
const Vec3V destNormal = boxPos.rotate(normal);
const FloatV length = FMul(dist, toi);
const Vec3V destWorldPointA = V3ScaleAdd(worldDir, length, worldPointA);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
return true;
}
bool sweepBox_SphereGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
PX_UNUSED(threadContext);
PX_UNUSED(hitFlags);
PX_UNUSED(boxGeom_);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
const FloatV zero = FZero();
const Vec3V zeroV = V3Zero();
const Vec3V boxExtents = V3LoadU(box.extents);
const FloatV worldDist = FLoad(distance);
const Vec3V unitDirV = V3LoadU(unitDir);
const FloatV sphereRadius = FLoad(sphereGeom.radius);
const PxTransformV spherePos = loadTransformU(pose);
const PxTransformV boxPos = loadTransformU(boxPose_);
const PxMatTransformV aToB(boxPos.transformInv(spherePos));
const BoxV boxV(zeroV, boxExtents);
const CapsuleV capsuleV(aToB.p, sphereRadius);
//transform into b space
const Vec3V dir = boxPos.rotateInv(V3Scale(unitDirV, worldDist));
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;//closestA and normal is in the local space of box
const Vec3V initialSearchDir = V3Sub(capsuleV.getCenter(), boxV.getCenter());
const LocalConvex<CapsuleV> convexA(capsuleV);
const LocalConvex<BoxV> convexB(boxV);
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, sphereGeom.radius+inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<LocalConvex<CapsuleV>, LocalConvex<BoxV> >(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, sphereGeom.radius+inflation, isMtd))
return false;
#endif
sweepHit.flags = PxHitFlag::eNORMAL;
//initial overlap
if(FAllGrtrOrEq(zero, toi))
{
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destWorldPointA = boxPos.transform(closestA);
const Vec3V destNormal = V3Neg(boxPos.rotate(normal));
const FloatV length = toi;
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
}
else
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destWorldPointA = boxPos.transform(closestA);
const Vec3V destNormal = V3Neg(boxPos.rotate(normal));
const FloatV length = FMul(worldDist, toi);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
return true;
}
bool sweepBox_CapsuleGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
PX_UNUSED(threadContext);
PX_UNUSED(hitFlags);
PX_UNUSED(boxGeom_);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
const FloatV capsuleHalfHeight = FLoad(capsuleGeom.halfHeight);
const FloatV capsuleRadius = FLoad(capsuleGeom.radius);
const FloatV zero = FZero();
const Vec3V zeroV = V3Zero();
const Vec3V boxExtents = V3LoadU(box.extents);
const FloatV worldDist = FLoad(distance);
const Vec3V unitDirV = V3LoadU(unitDir);
const PxTransformV capPos = loadTransformU(pose);
const PxTransformV boxPos = loadTransformU(boxPose_);
const PxMatTransformV aToB(boxPos.transformInv(capPos));
const BoxV boxV(zeroV, boxExtents);
const CapsuleV capsuleV(aToB.p, aToB.rotate(V3Scale(V3UnitX(), capsuleHalfHeight)), capsuleRadius);
//transform into b space
const Vec3V dir = boxPos.rotateInv(V3Scale(unitDirV, worldDist));
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;//closestA and normal is in the local space of box
const Vec3V initialSearchDir = V3Sub(capsuleV.getCenter(), boxV.getCenter());
const LocalConvex<CapsuleV> convexA(capsuleV);
const LocalConvex<BoxV> convexB(boxV);
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, capsuleGeom.radius+inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<LocalConvex<CapsuleV>, LocalConvex<BoxV> >(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, capsuleGeom.radius+inflation, isMtd))
return false;
#endif
sweepHit.flags = PxHitFlag::eNORMAL;
//initial overlap
if(FAllGrtrOrEq(zero, toi))
{
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
//initial overlap is toi < 0
const FloatV length = toi;
const Vec3V destWorldPointA = boxPos.transform(closestA);
const Vec3V destNormal = boxPos.rotate(normal);
V3StoreU(V3Neg(destNormal), sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
return true;
}
else
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destWorldPointA = boxPos.transform(closestA);
const Vec3V destNormal = boxPos.rotate(normal);
const FloatV length = FMul(worldDist, toi);
V3StoreU(V3Neg(destNormal), sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
return true;
}
bool sweepBox_BoxGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
PX_UNUSED(threadContext);
PX_UNUSED(boxGeom_);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
const FloatV zero = FZero();
const Vec3V zeroV = V3Zero();
const Vec3V boxExtents0 = V3LoadU(boxGeom.halfExtents);
const Vec3V boxExtents1 = V3LoadU(box.extents);
const FloatV worldDist = FLoad(distance);
const Vec3V unitDirV = V3LoadU(unitDir);
const PxTransformV boxTrans0 = loadTransformU(pose);
const PxTransformV boxTrans1 = loadTransformU(boxPose_);
const PxMatTransformV aToB(boxTrans1.transformInv(boxTrans0));
const BoxV box0(zeroV, boxExtents0);
const BoxV box1(zeroV, boxExtents1);
//transform into b space
const Vec3V dir = boxTrans1.rotateInv(V3Scale(unitDirV, worldDist));
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;//closestA and normal is in the local space of box
const RelativeConvex<BoxV> convexA(box0, aToB);
const LocalConvex<BoxV> convexB(box1);
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<RelativeConvex<BoxV>, LocalConvex<BoxV> >(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#endif
sweepHit.flags = PxHitFlag::eNORMAL;
if(FAllGrtrOrEq(zero, toi))
{
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const FloatV length = toi;
const Vec3V destWorldPointA = boxTrans1.transform(closestA);
const Vec3V destNormal = V3Normalize(boxTrans1.rotate(normal));
V3StoreU(V3Neg(destNormal), sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
}
else
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destWorldPointA = boxTrans1.transform(closestA);
const Vec3V destNormal = V3Normalize(boxTrans1.rotate(normal));
const FloatV length = FMul(worldDist, toi);
V3StoreU(V3Neg(destNormal), sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
}
return true;
}
bool Gu::sweepBoxTriangles(GU_SWEEP_TRIANGLES_FUNC_PARAMS(PxBoxGeometry))
{
PX_UNUSED(hitFlags);
if(!nbTris)
return false;
const bool meshBothSides = hitFlags & PxHitFlag::eMESH_BOTH_SIDES;
const bool doBackfaceCulling = !doubleSided && !meshBothSides;
Box box;
buildFrom(box, pose.p, geom.halfExtents, pose.q);
PxGeomSweepHit sweepHit;
// Move to AABB space
PxMat34 worldToBox;
computeWorldToBoxMatrix(worldToBox, box);
const PxVec3 localDir = worldToBox.rotate(unitDir);
const PxVec3 localMotion = localDir * distance;
const Vec3V base0 = V3LoadU(worldToBox.m.column0);
const Vec3V base1 = V3LoadU(worldToBox.m.column1);
const Vec3V base2 = V3LoadU(worldToBox.m.column2);
const Mat33V matV(base0, base1, base2);
const Vec3V p = V3LoadU(worldToBox.p);
const PxMatTransformV worldToBoxV(p, matV);
const FloatV zero = FZero();
const Vec3V zeroV = V3Zero();
const Vec3V boxExtents = V3LoadU(box.extents);
const Vec3V boxDir = V3LoadU(localDir);
const FloatV inflationV = FLoad(inflation);
const Vec3V absBoxDir = V3Abs(boxDir);
const FloatV boxRadiusV = FAdd(V3Dot(absBoxDir, boxExtents), inflationV);
BoxV boxV(zeroV, boxExtents);
#if PX_DEBUG
PxU32 totalTestsExpected = nbTris;
PxU32 totalTestsReal = 0;
PX_UNUSED(totalTestsExpected);
PX_UNUSED(totalTestsReal);
#endif
Vec3V boxLocalMotion = V3LoadU(localMotion);
Vec3V minClosestA = zeroV, minNormal = zeroV;
PxU32 minTriangleIndex = 0;
PxVec3 bestTriNormal(0.0f);
FloatV dist = FLoad(distance);
const PxTransformV boxPos = loadTransformU(pose);
bool status = false;
const PxU32 idx = cachedIndex ? *cachedIndex : 0;
for(PxU32 ii=0;ii<nbTris;ii++)
{
const PxU32 triangleIndex = getTriangleIndex(ii, idx);
const Vec3V localV0 = V3LoadU(triangles[triangleIndex].verts[0]);
const Vec3V localV1 = V3LoadU(triangles[triangleIndex].verts[1]);
const Vec3V localV2 = V3LoadU(triangles[triangleIndex].verts[2]);
const Vec3V triV0 = worldToBoxV.transform(localV0);
const Vec3V triV1 = worldToBoxV.transform(localV1);
const Vec3V triV2 = worldToBoxV.transform(localV2);
const Vec3V triNormal = V3Cross(V3Sub(triV2, triV1),V3Sub(triV0, triV1));
if(doBackfaceCulling && FAllGrtrOrEq(V3Dot(triNormal, boxLocalMotion), zero)) // backface culling
continue;
const FloatV dp0 = V3Dot(triV0, boxDir);
const FloatV dp1 = V3Dot(triV1, boxDir);
const FloatV dp2 = V3Dot(triV2, boxDir);
const FloatV dp = FMin(dp0, FMin(dp1, dp2));
const Vec3V dpV = V3Merge(dp0, dp1, dp2);
const FloatV temp1 = FAdd(boxRadiusV, dist);
const BoolV con0 = FIsGrtr(dp, temp1);
const BoolV con1 = V3IsGrtr(zeroV, dpV);
if(BAllEqTTTT(BOr(con0, con1)))
continue;
#if PX_DEBUG
totalTestsReal++;
#endif
TriangleV triangleV(triV0, triV1, triV2);
FloatV lambda;
Vec3V closestA, normal;//closestA and normal is in the local space of convex hull
const LocalConvex<TriangleV> convexA(triangleV);
const LocalConvex<BoxV> convexB(boxV);
const Vec3V initialSearchDir = V3Sub(triangleV.getCenter(), boxV.getCenter());
#ifdef USE_VIRTUAL_GJK
if(virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, boxLocalMotion, lambda, normal, closestA, inflation, false))
#else
if(gjkRaycastPenetration<LocalConvex<TriangleV>, LocalConvex<BoxV> >(convexA, convexB, initialSearchDir, zero, zeroV, boxLocalMotion, lambda, normal, closestA, inflation, false))
#endif
{
//hitCount++;
if(FAllGrtrOrEq(zero, lambda))
{
hit.distance = 0.0f;
hit.faceIndex = triangleIndex;
hit.normal = -unitDir;
hit.flags = PxHitFlag::eNORMAL;
return true;
}
dist = FMul(dist, lambda);
boxLocalMotion = V3Scale(boxDir, dist);
minClosestA = closestA;
minNormal = normal;
minTriangleIndex = triangleIndex;
V3StoreU(triNormal, bestTriNormal);
status = true;
if(hitFlags & PxHitFlag::eMESH_ANY)
break;
}
}
if(!status)
return false;
hit.faceIndex = minTriangleIndex;
const Vec3V destNormal = V3Neg(V3Normalize(boxPos.rotate(minNormal)));
const Vec3V destWorldPointA = boxPos.transform(minClosestA);
V3StoreU(destNormal, hit.normal);
V3StoreU(destWorldPointA, hit.position);
FStore(dist, &hit.distance);
// PT: by design, returned normal is opposed to the sweep direction.
if(shouldFlipNormal(hit.normal, meshBothSides, doubleSided, bestTriNormal, unitDir))
hit.normal = -hit.normal;
hit.flags = PxHitFlag::ePOSITION|PxHitFlag::eNORMAL;
return true;
}
bool sweepCapsule_SphereGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_PlaneGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_CapsuleGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_BoxGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_BoxGeom_Precise (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_ConvexGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_MeshGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepCapsule_HeightFieldGeom (GU_CAPSULE_SWEEP_FUNC_PARAMS);
bool sweepBox_SphereGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_SphereGeom_Precise (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_PlaneGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_CapsuleGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_CapsuleGeom_Precise (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_BoxGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_BoxGeom_Precise (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_ConvexGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_MeshGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_HeightFieldGeom (GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepBox_HeightFieldGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS);
bool sweepConvex_SphereGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
bool sweepConvex_PlaneGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
bool sweepConvex_CapsuleGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
bool sweepConvex_BoxGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
bool sweepConvex_ConvexGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
bool sweepConvex_MeshGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
bool sweepConvex_HeightFieldGeom (GU_CONVEX_SWEEP_FUNC_PARAMS);
static bool sweepCapsule_InvalidGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(capsuleGeom_);
PX_UNUSED(capsulePose_);
PX_UNUSED(geom);
PX_UNUSED(pose);
PX_UNUSED(lss);
PX_UNUSED(unitDir);
PX_UNUSED(distance);
PX_UNUSED(sweepHit);
PX_UNUSED(hitFlags);
PX_UNUSED(inflation);
return false;
}
static bool sweepBox_InvalidGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
PX_UNUSED(geom);
PX_UNUSED(pose);
PX_UNUSED(box);
PX_UNUSED(unitDir);
PX_UNUSED(distance);
PX_UNUSED(sweepHit);
PX_UNUSED(hitFlags);
PX_UNUSED(inflation);
return false;
}
static bool sweepConvex_InvalidGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(geom);
PX_UNUSED(pose);
PX_UNUSED(convexGeom);
PX_UNUSED(convexPose);
PX_UNUSED(unitDir);
PX_UNUSED(distance);
PX_UNUSED(sweepHit);
PX_UNUSED(hitFlags);
PX_UNUSED(inflation);
return false;
}
static bool sweepCapsule_CustomGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(lss);
if(geom.getType() == PxGeometryType::eCUSTOM)
return static_cast<const PxCustomGeometry&>(geom).callbacks->sweep(unitDir, distance, geom, pose, capsuleGeom_, capsulePose_, sweepHit, hitFlags, inflation, threadContext);
return false;
}
static bool sweepBox_CustomGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(box);
if(geom.getType() == PxGeometryType::eCUSTOM)
return static_cast<const PxCustomGeometry&>(geom).callbacks->sweep(unitDir, distance, geom, pose, boxGeom_, boxPose_, sweepHit, hitFlags, inflation, threadContext);
return false;
}
static bool sweepConvex_CustomGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
if(geom.getType() == PxGeometryType::eCUSTOM)
return static_cast<const PxCustomGeometry&>(geom).callbacks->sweep(unitDir, distance, geom, pose, convexGeom, convexPose, sweepHit, hitFlags, inflation, threadContext);
return false;
}
Gu::GeomSweepFuncs gGeomSweepFuncs =
{
{
sweepCapsule_SphereGeom,
sweepCapsule_PlaneGeom,
sweepCapsule_CapsuleGeom,
sweepCapsule_BoxGeom,
sweepCapsule_ConvexGeom,
sweepCapsule_InvalidGeom,
sweepCapsule_InvalidGeom,
sweepCapsule_MeshGeom,
sweepCapsule_HeightFieldGeom,
sweepCapsule_InvalidGeom,
sweepCapsule_CustomGeom
},
{
sweepCapsule_SphereGeom,
sweepCapsule_PlaneGeom,
sweepCapsule_CapsuleGeom,
sweepCapsule_BoxGeom_Precise,
sweepCapsule_ConvexGeom,
sweepCapsule_InvalidGeom,
sweepCapsule_InvalidGeom,
sweepCapsule_MeshGeom ,
sweepCapsule_HeightFieldGeom,
sweepCapsule_InvalidGeom,
sweepCapsule_CustomGeom
},
{
sweepBox_SphereGeom,
sweepBox_PlaneGeom,
sweepBox_CapsuleGeom,
sweepBox_BoxGeom,
sweepBox_ConvexGeom,
sweepBox_InvalidGeom,
sweepBox_InvalidGeom,
sweepBox_MeshGeom,
sweepBox_HeightFieldGeom,
sweepBox_InvalidGeom,
sweepBox_CustomGeom
},
{
sweepBox_SphereGeom_Precise,
sweepBox_PlaneGeom,
sweepBox_CapsuleGeom_Precise,
sweepBox_BoxGeom_Precise,
sweepBox_ConvexGeom,
sweepBox_InvalidGeom,
sweepBox_InvalidGeom,
sweepBox_MeshGeom,
sweepBox_HeightFieldGeom_Precise,
sweepBox_InvalidGeom,
sweepBox_CustomGeom
},
{
sweepConvex_SphereGeom, // 0
sweepConvex_PlaneGeom, // 1
sweepConvex_CapsuleGeom, // 2
sweepConvex_BoxGeom, // 3
sweepConvex_ConvexGeom, // 4
sweepConvex_InvalidGeom, // 5
sweepConvex_InvalidGeom, // 6
sweepConvex_MeshGeom, // 7
sweepConvex_HeightFieldGeom,// 8
sweepConvex_InvalidGeom, // 9
sweepConvex_CustomGeom // 10
}
};
PX_PHYSX_COMMON_API const GeomSweepFuncs& Gu::getSweepFuncTable()
{
return gGeomSweepFuncs;
}
| 22,163 | C++ | 31.982143 | 216 | 0.753328 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuIncrementalAABBTree.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMemory.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxFPU.h"
#include "foundation/PxMathUtils.h"
#include "GuIncrementalAABBTree.h"
#include "GuAABBTreeBuildStats.h"
#include "GuAABBTreeNode.h"
#include "GuBVH.h"
using namespace physx;
using namespace aos;
using namespace Gu;
#define SUPPORT_TREE_ROTATION 1
#define DEALLOCATE_RESET 0
IncrementalAABBTree::IncrementalAABBTree():
mIndicesPool("AABBTreeIndicesPool", 256),
mNodesPool("AABBTreeNodesPool", 256 ),
mRoot(NULL)
{
}
IncrementalAABBTree::~IncrementalAABBTree()
{
release();
}
void IncrementalAABBTree::release()
{
if(mRoot)
{
releaseNode(mRoot);
mRoot = NULL;
}
}
void IncrementalAABBTree::releaseNode(IncrementalAABBTreeNode* node)
{
PX_ASSERT(node);
if(node->isLeaf())
{
mIndicesPool.deallocate(node->mIndices);
}
else
{
releaseNode(node->mChilds[0]);
releaseNode(node->mChilds[1]);
}
if(!node->mParent)
{
mNodesPool.deallocate(reinterpret_cast<IncrementalAABBTreeNodePair*>(node));
return;
}
if(node->mParent->mChilds[1] == node)
{
mNodesPool.deallocate(reinterpret_cast<IncrementalAABBTreeNodePair*>(node->mParent->mChilds[0]));
}
}
// check if node is inside the given bounds
PX_FORCE_INLINE static bool nodeInsideBounds(const Vec4V& nodeMin, const Vec4V& nodeMax, const Vec4V& parentMin, const Vec4V& parentMax)
{
return !(PxIntBool(V4AnyGrtr3(parentMin, nodeMin)) || PxIntBool(V4AnyGrtr3(nodeMax, parentMax)));
}
// update the node parent hierarchy, when insert happen, we can early exit when the node is inside its parent
// no further update is needed
PX_FORCE_INLINE static void updateHierarchyAfterInsert(IncrementalAABBTreeNode* node)
{
IncrementalAABBTreeNode* parent = node->mParent;
IncrementalAABBTreeNode* testNode = node;
while(parent)
{
// check if we can early exit
if(!nodeInsideBounds(testNode->mBVMin, testNode->mBVMax, parent->mBVMin, parent->mBVMax))
{
parent->mBVMin = V4Min(parent->mChilds[0]->mBVMin, parent->mChilds[1]->mBVMin);
parent->mBVMax = V4Max(parent->mChilds[0]->mBVMax, parent->mChilds[1]->mBVMax);
}
else
break;
testNode = parent;
parent = parent->mParent;
}
}
// add an index into the leaf indices list and update the node bounds
PX_FORCE_INLINE static void addPrimitiveIntoNode(IncrementalAABBTreeNode* node, const PoolIndex index, const Vec4V& minV, const Vec4V& maxV)
{
PX_ASSERT(node->isLeaf());
AABBTreeIndices& nodeIndices = *node->mIndices;
PX_ASSERT(nodeIndices.nbIndices < INCR_NB_OBJECTS_PER_NODE);
// store the new handle
nodeIndices.indices[nodeIndices.nbIndices++] = index;
// increase the node bounds
node->mBVMin = V4Min(node->mBVMin, minV);
node->mBVMax = V4Max(node->mBVMax, maxV);
updateHierarchyAfterInsert(node);
}
// check if node does intersect with given bounds
PX_FORCE_INLINE static bool nodeIntersection(IncrementalAABBTreeNode& node, const Vec4V& minV, const Vec4V& maxV)
{
return !(PxIntBool(V4AnyGrtr3(node.mBVMin, maxV)) || PxIntBool(V4AnyGrtr3(minV, node.mBVMax)));
}
// traversal strategy
PX_FORCE_INLINE static PxU32 traversalDirection(const IncrementalAABBTreeNode& child0, const IncrementalAABBTreeNode& child1, const Vec4V& testCenterV,
bool testRotation, bool& rotateNode, PxU32& largesRotateNode)
{
// traverse in the direction of a node which is closer
// we compare the node and object centers
const Vec4V centerCh0V = V4Add(child0.mBVMax, child0.mBVMin);
const Vec4V centerCh1V = V4Add(child1.mBVMax, child1.mBVMin);
const Vec4V ch0D = V4Sub(testCenterV, centerCh0V);
const Vec4V ch1D = V4Sub(testCenterV, centerCh1V);
if(testRotation)
{
// if some volume is 3x larger than we do a rotation
const float volumeCompare = 3.0f;
PX_ALIGN(16, PxVec4) sizeCh0;
PX_ALIGN(16, PxVec4) sizeCh1;
const Vec4V sizeCh0V = V4Sub(child0.mBVMax, child0.mBVMin);
const Vec4V sizeCh1V = V4Sub(child1.mBVMax, child1.mBVMin);
V4StoreA(sizeCh0V, &sizeCh0.x);
V4StoreA(sizeCh1V, &sizeCh1.x);
const float volumeCh0 = sizeCh0.x*sizeCh0.y*sizeCh0.z;
const float volumeCh1 = sizeCh1.x*sizeCh1.y*sizeCh1.z;
if((volumeCh0*volumeCompare < volumeCh1) || (volumeCh1*volumeCompare < volumeCh0))
{
largesRotateNode = (volumeCh0 > volumeCh1) ? 0u : 1u;
rotateNode = true;
}
}
const BoolV con = FIsGrtr(V4Dot3(ch0D, ch0D), V4Dot3(ch1D, ch1D));
return (BAllEqTTTT(con) == 1) ? PxU32(1) : PxU32(0);
}
// remove an index from the leaf
PX_FORCE_INLINE static void removePrimitiveFromNode(IncrementalAABBTreeNode* node, const PoolIndex index)
{
AABBTreeIndices& indices = *node->mIndices;
PX_ASSERT(indices.nbIndices > 1);
for (PxU32 i = indices.nbIndices; i--; )
{
if(node->mIndices->indices[i] == index)
{
node->mIndices->indices[i] = node->mIndices->indices[--indices.nbIndices];
return;
}
}
// if handle was not found something is wrong here
PX_ASSERT(0);
}
// check if bounds are equal with given node min/max
PX_FORCE_INLINE static bool boundsEqual(const Vec4V& testMin, const Vec4V& testMax, const Vec4V& nodeMin, const Vec4V& nodeMax)
{
return (PxIntBool(V4AllEq(nodeMin, testMin)) && PxIntBool(V4AllEq(testMax, nodeMax)));
}
// update the node hierarchy bounds when remove happen, we can early exit if the bounds are equal and no bounds update
// did happen
PX_FORCE_INLINE static void updateHierarchyAfterRemove(IncrementalAABBTreeNode* node, const PxBounds3* bounds)
{
if(node->isLeaf())
{
const AABBTreeIndices& indices = *node->mIndices;
PX_ASSERT(indices.nbIndices > 0);
Vec4V bvMin = V4LoadU(&bounds[indices.indices[0]].minimum.x);
Vec4V bvMax = V4LoadU(&bounds[indices.indices[0]].maximum.x);
for(PxU32 i = 1; i < indices.nbIndices; i++)
{
const Vec4V minV = V4LoadU(&bounds[indices.indices[i]].minimum.x);
const Vec4V maxV = V4LoadU(&bounds[indices.indices[i]].maximum.x);
bvMin = V4Min(bvMin, minV);
bvMax = V4Max(bvMax, maxV);
}
node->mBVMin = V4ClearW(bvMin);
node->mBVMax = V4ClearW(bvMax);
}
else
{
node->mBVMin = V4Min(node->mChilds[0]->mBVMin, node->mChilds[1]->mBVMin);
node->mBVMax = V4Max(node->mChilds[0]->mBVMax, node->mChilds[1]->mBVMax);
}
IncrementalAABBTreeNode* parent = node->mParent;
while(parent)
{
const Vec4V newMinV = V4Min(parent->mChilds[0]->mBVMin, parent->mChilds[1]->mBVMin);
const Vec4V newMaxV = V4Max(parent->mChilds[0]->mBVMax, parent->mChilds[1]->mBVMax);
const bool earlyExit = boundsEqual(newMinV, newMaxV, parent->mBVMin, parent->mBVMax);
if(earlyExit)
break;
parent->mBVMin = newMinV;
parent->mBVMax = newMaxV;
parent = parent->mParent;
}
}
// split the leaf node along the most significant axis
IncrementalAABBTreeNode* IncrementalAABBTree::splitLeafNode(IncrementalAABBTreeNode* node, const PoolIndex index, const Vec4V& minV, const Vec4V& maxV, const PxBounds3* bounds)
{
PX_ASSERT(node->isLeaf());
IncrementalAABBTreeNode* returnNode = NULL;
// create new pairs of nodes, parent will remain the node (the one we split)
IncrementalAABBTreeNode* child0 = reinterpret_cast<IncrementalAABBTreeNode*>(mNodesPool.allocate());
IncrementalAABBTreeNode* child1 = child0 + 1;
AABBTreeIndices* newIndices = mIndicesPool.allocate();
// get the split axis
PX_ALIGN(16, PxVec4) vars;
PX_ALIGN(16, PxVec4) center;
const float half = 0.5f;
const FloatV halfV = FLoad(half);
const Vec4V newMinV = V4Min(node->mBVMin, minV);
const Vec4V newMaxV = V4Max(node->mBVMax, maxV);
const Vec4V centerV = V4Scale(V4Add(newMaxV, newMinV), halfV);
const Vec4V varsV = V4Sub(newMaxV, newMinV);
V4StoreA(varsV, &vars.x);
V4StoreA(centerV, ¢er.x);
const PxU32 axis = PxLargestAxis(PxVec3(vars.x, vars.y, vars.z));
// setup parent
child0->mParent = node;
child1->mParent = node;
child0->mIndices = node->mIndices;
child0->mChilds[1] = NULL;
child1->mIndices = newIndices;
child1->mChilds[1] = NULL;
AABBTreeIndices& child0Indices = *child0->mIndices; // the original node indices
AABBTreeIndices& child1Indices = *child1->mIndices; // new empty indices
child1Indices.nbIndices = 0;
// split the node
for(PxU32 i = child0Indices.nbIndices; i--;)
{
const PxBounds3& primitiveBounds = bounds[child0Indices.indices[i]];
const float pCenter = primitiveBounds.getCenter(axis);
if(center[axis] >= pCenter)
{
// move to new node
child1Indices.indices[child1Indices.nbIndices++] = child0Indices.indices[i];
child0Indices.nbIndices--;
child0Indices.indices[i] = child0Indices.indices[child0Indices.nbIndices];
}
}
// check where to put the new node, if there is still a free space
if(child0Indices.nbIndices == 0 || child1Indices.nbIndices == INCR_NB_OBJECTS_PER_NODE)
{
child0Indices.nbIndices = 1;
child0Indices.indices[0] = index;
returnNode = child0;
}
else
{
if(child0Indices.nbIndices == INCR_NB_OBJECTS_PER_NODE)
{
child1Indices.nbIndices = 1;
child1Indices.indices[0] = index;
returnNode = child1;
}
else
{
const PxBounds3& primitiveBounds = bounds[index];
const float pCenter = primitiveBounds.getCenter(axis);
if(center[axis] >= pCenter)
{
// move to new node
child1Indices.indices[child1Indices.nbIndices++] = index;
returnNode = child1;
}
else
{
// move to old node
child0Indices.indices[child0Indices.nbIndices++] = index;
returnNode = child0;
}
}
}
// update bounds for the new nodes
Vec4V bvMin = V4LoadU(&bounds[child0Indices.indices[0]].minimum.x);
Vec4V bvMax = V4LoadU(&bounds[child0Indices.indices[0]].maximum.x);
for(PxU32 i = 1; i < child0Indices.nbIndices; i++)
{
const Vec4V nodeMinV = V4LoadU(&bounds[child0Indices.indices[i]].minimum.x);
const Vec4V nodeMaxV = V4LoadU(&bounds[child0Indices.indices[i]].maximum.x);
bvMin = V4Min(bvMin, nodeMinV);
bvMax = V4Max(bvMax, nodeMaxV);
}
child0->mBVMin = V4ClearW(bvMin);
child0->mBVMax = V4ClearW(bvMax);
bvMin = V4LoadU(&bounds[child1Indices.indices[0]].minimum.x);
bvMax = V4LoadU(&bounds[child1Indices.indices[0]].maximum.x);
for(PxU32 i = 1; i < child1Indices.nbIndices; i++)
{
const Vec4V nodeMinV = V4LoadU(&bounds[child1Indices.indices[i]].minimum.x);
const Vec4V nodeMaxV = V4LoadU(&bounds[child1Indices.indices[i]].maximum.x);
bvMin = V4Min(bvMin, nodeMinV);
bvMax = V4Max(bvMax, nodeMaxV);
}
child1->mBVMin = V4ClearW(bvMin);
child1->mBVMax = V4ClearW(bvMax);
// node parent is the same, setup the new childs
node->mChilds[0] = child0;
node->mChilds[1] = child1;
node->mBVMin = newMinV;
node->mBVMax = newMaxV;
updateHierarchyAfterInsert(node);
PX_ASSERT(returnNode);
return returnNode;
}
void IncrementalAABBTree::rotateTree(IncrementalAABBTreeNode* node, NodeList& changedLeaf, PxU32 largesRotateNodeIn, const PxBounds3* bounds, bool rotateAgain)
{
PX_ASSERT(!node->isLeaf());
IncrementalAABBTreeNode* smallerNode = node->mChilds[(largesRotateNodeIn == 0) ? 1 : 0];
IncrementalAABBTreeNode* largerNode = node->mChilds[largesRotateNodeIn];
PX_ASSERT(!largerNode->isLeaf());
// take a leaf from larger node and add it to the smaller node
const Vec4V testCenterV = V4Add(smallerNode->mBVMax, smallerNode->mBVMin);
IncrementalAABBTreeNode* rotationNode = NULL; // store a node that seems not balanced
PxU32 largesRotateNode = 0;
bool rotateNode = false;
PxU32 traversalIndex = traversalDirection(*largerNode->mChilds[0], *largerNode->mChilds[1], testCenterV, false, rotateNode, largesRotateNode);
IncrementalAABBTreeNode* closestNode = largerNode->mChilds[traversalIndex];
while(!closestNode->isLeaf())
{
PxPrefetchLine(closestNode->mChilds[0]->mChilds[0]);
PxPrefetchLine(closestNode->mChilds[1]->mChilds[0]);
traversalIndex = traversalDirection(*closestNode->mChilds[0], *closestNode->mChilds[1], testCenterV, false, rotateNode, largesRotateNode);
closestNode = closestNode->mChilds[traversalIndex];
}
// we have the leaf that we want to rotate
// create new parent and remove the current leaf
changedLeaf.findAndReplaceWithLast(closestNode);
IncrementalAABBTreeNode* parent = closestNode->mParent;
IncrementalAABBTreeNodePair* removedPair = reinterpret_cast<IncrementalAABBTreeNodePair*>(parent->mChilds[0]);
PX_ASSERT(!parent->isLeaf());
// copy the remaining child into parent
IncrementalAABBTreeNode* remainingChild = (parent->mChilds[0] == closestNode) ? parent->mChilds[1] : parent->mChilds[0];
parent->mBVMax = remainingChild->mBVMax;
parent->mBVMin = remainingChild->mBVMin;
if(remainingChild->isLeaf())
{
parent->mIndices = remainingChild->mIndices;
parent->mChilds[1] = NULL;
changedLeaf.findAndReplaceWithLast(remainingChild);
changedLeaf.pushBack(parent);
}
else
{
parent->mChilds[0] = remainingChild->mChilds[0];
parent->mChilds[0]->mParent = parent;
parent->mChilds[1] = remainingChild->mChilds[1];
parent->mChilds[1]->mParent = parent;
}
// update the hieararchy after the node removal
if(parent->mParent)
{
updateHierarchyAfterRemove(parent->mParent, bounds);
}
// find new spot for the node
// take a leaf from larger node and add it to the smaller node
IncrementalAABBTreeNode* newSpotNode = NULL;
if(smallerNode->isLeaf())
{
newSpotNode = smallerNode;
}
else
{
const Vec4V testClosestNodeCenterV = V4Add(closestNode->mBVMax, closestNode->mBVMin);
rotationNode = NULL; // store a node that seems not balanced
largesRotateNode = 0;
rotateNode = false;
bool testRotation = rotateAgain;
traversalIndex = traversalDirection(*smallerNode->mChilds[0], *smallerNode->mChilds[1], testClosestNodeCenterV, testRotation, rotateNode, largesRotateNode);
if(rotateNode && !smallerNode->mChilds[largesRotateNode]->isLeaf())
{
rotationNode = smallerNode;
testRotation = false;
}
newSpotNode = smallerNode->mChilds[traversalIndex];
while(!newSpotNode->isLeaf())
{
PxPrefetchLine(newSpotNode->mChilds[0]->mChilds[0]);
PxPrefetchLine(newSpotNode->mChilds[1]->mChilds[0]);
traversalIndex = traversalDirection(*newSpotNode->mChilds[0], *newSpotNode->mChilds[1], testClosestNodeCenterV, testRotation, rotateNode, largesRotateNode);
if(!rotationNode && rotateNode && !newSpotNode->mChilds[largesRotateNode]->isLeaf())
{
rotationNode = newSpotNode;
testRotation = false;
}
newSpotNode = newSpotNode->mChilds[traversalIndex];
}
}
// we have the closest leaf in the smaller child, lets merge it with the closestNode
if(newSpotNode->getNbPrimitives() + closestNode->getNbPrimitives() <= INCR_NB_OBJECTS_PER_NODE)
{
// all primitives fit into new spot, we merge here simply
AABBTreeIndices* targetIndices = newSpotNode->mIndices;
const AABBTreeIndices* sourceIndices = closestNode->mIndices;
for(PxU32 i = 0; i < sourceIndices->nbIndices; i++)
{
targetIndices->indices[targetIndices->nbIndices++] = sourceIndices->indices[i];
}
PX_ASSERT(targetIndices->nbIndices <= INCR_NB_OBJECTS_PER_NODE);
if(changedLeaf.find(newSpotNode) == changedLeaf.end())
changedLeaf.pushBack(newSpotNode);
mIndicesPool.deallocate(closestNode->mIndices);
newSpotNode->mBVMin = V4Min(newSpotNode->mBVMin, closestNode->mBVMin);
newSpotNode->mBVMax = V4Max(newSpotNode->mBVMax, closestNode->mBVMax);
updateHierarchyAfterInsert(newSpotNode);
}
else
{
// we need to make new parent with newSpotNode and closestNode as childs
// create new pairs of nodes, parent will remain the node (the one we split)
IncrementalAABBTreeNode* child0 = reinterpret_cast<IncrementalAABBTreeNode*>(mNodesPool.allocate());
IncrementalAABBTreeNode* child1 = child0 + 1;
// setup parent
child0->mParent = newSpotNode;
child1->mParent = newSpotNode;
child0->mIndices = newSpotNode->mIndices;
child0->mChilds[1] = NULL;
child0->mBVMin = newSpotNode->mBVMin;
child0->mBVMax = newSpotNode->mBVMax;
child1->mIndices = closestNode->mIndices;
child1->mChilds[1] = NULL;
child1->mBVMin = closestNode->mBVMin;
child1->mBVMax = closestNode->mBVMax;
// node parent is the same, setup the new childs
newSpotNode->mChilds[0] = child0;
newSpotNode->mChilds[1] = child1;
newSpotNode->mBVMin = V4Min(child0->mBVMin, child1->mBVMin);
newSpotNode->mBVMax = V4Max(child0->mBVMax, child1->mBVMax);
updateHierarchyAfterInsert(newSpotNode);
changedLeaf.findAndReplaceWithLast(newSpotNode);
changedLeaf.pushBack(child0);
changedLeaf.pushBack(child1);
}
// deallocate the closestNode, it has been moved
#if DEALLOCATE_RESET
removedPair->mNode0.mChilds[0] = NULL;
removedPair->mNode0.mChilds[1] = NULL;
removedPair->mNode1.mChilds[0] = NULL;
removedPair->mNode1.mChilds[1] = NULL;
#endif
mNodesPool.deallocate(removedPair);
// try to do one more rotation for the newly added node part of tree
if(rotationNode)
{
rotateTree(rotationNode, changedLeaf, largesRotateNode, bounds, false);
}
}
// insert new bounds into tree
IncrementalAABBTreeNode* IncrementalAABBTree::insert(const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf)
{
PX_SIMD_GUARD;
// get the bounds, reset the W value
const Vec4V minV = V4ClearW(V4LoadU(&bounds[index].minimum.x));
const Vec4V maxV = V4ClearW(V4LoadU(&bounds[index].maximum.x));
// check if tree is empty
if(!mRoot)
{
// make it a leaf
AABBTreeIndices* indices = mIndicesPool.construct(index);
mRoot = reinterpret_cast<IncrementalAABBTreeNode*> (mNodesPool.allocate());
mRoot->mBVMin = minV;
mRoot->mBVMax = maxV;
mRoot->mIndices = indices;
mRoot->mChilds[1] = NULL;
mRoot->mParent = NULL;
return mRoot;
}
else
{
// check if root is a leaf
if(mRoot->isLeaf())
{
// if we still can insert the primitive into the leaf, or we need to split
if(mRoot->getNbPrimitives() < INCR_NB_OBJECTS_PER_NODE)
{
// simply add the primitive into the current leaf
addPrimitiveIntoNode(mRoot, index, minV, maxV);
return mRoot;
}
else
{
// need to split the node
// check if the leaf is not marked as changed, we need to remove it
if(!changedLeaf.empty())
{
PX_ASSERT(changedLeaf.size() == 1);
if(changedLeaf[0] == mRoot)
changedLeaf.popBack();
}
IncrementalAABBTreeNode* retNode = splitLeafNode(mRoot, index, minV, maxV, bounds);
mRoot = retNode->mParent;
IncrementalAABBTreeNode* sibling = (mRoot->mChilds[0] == retNode) ? mRoot->mChilds[1] : mRoot->mChilds[0];
if(sibling->isLeaf())
changedLeaf.pushBack(sibling);
changedLeaf.pushBack(retNode);
return retNode;
}
}
else
{
const Vec4V testCenterV = V4Add(maxV, minV);
IncrementalAABBTreeNode* returnNode = NULL;
IncrementalAABBTreeNode* rotationNode = NULL; // store a node that seems not balanced
PxU32 largesRotateNode = 0;
bool rotateNode = false;
#if SUPPORT_TREE_ROTATION
bool testRotation = true;
#else
bool testRotation = false;
#endif
// we dont need to modify root, lets traverse the tree to find the right spot
PxU32 traversalIndex = traversalDirection(*mRoot->mChilds[0], *mRoot->mChilds[1], testCenterV, testRotation, rotateNode, largesRotateNode);
if(rotateNode && !mRoot->mChilds[largesRotateNode]->isLeaf())
{
rotationNode = mRoot;
testRotation = false;
}
IncrementalAABBTreeNode* baseNode = mRoot->mChilds[traversalIndex];
while(!baseNode->isLeaf())
{
PxPrefetchLine(baseNode->mChilds[0]->mChilds[0]);
PxPrefetchLine(baseNode->mChilds[1]->mChilds[0]);
traversalIndex = traversalDirection(*baseNode->mChilds[0], *baseNode->mChilds[1], testCenterV, testRotation, rotateNode, largesRotateNode);
if(!rotationNode && rotateNode && !baseNode->mChilds[largesRotateNode]->isLeaf())
{
rotationNode = baseNode;
testRotation = false;
}
baseNode = baseNode->mChilds[traversalIndex];
}
// if we still can insert the primitive into the leaf, or we need to split
if(baseNode->getNbPrimitives() < INCR_NB_OBJECTS_PER_NODE)
{
// simply add the primitive into the current leaf
addPrimitiveIntoNode(baseNode, index, minV, maxV);
returnNode = baseNode;
if(!changedLeaf.empty())
{
PX_ASSERT(changedLeaf.size() == 1);
if(changedLeaf[0] != baseNode)
changedLeaf.pushBack(baseNode);
}
else
changedLeaf.pushBack(baseNode);
}
else
{
// split
// check if the leaf is not marked as changed, we need to remove it
if(!changedLeaf.empty())
{
PX_ASSERT(changedLeaf.size() == 1);
if(changedLeaf[0] == baseNode)
changedLeaf.popBack();
}
IncrementalAABBTreeNode* retNode = splitLeafNode(baseNode, index, minV, maxV, bounds);
const IncrementalAABBTreeNode* splitParent = retNode->mParent;
changedLeaf.pushBack(splitParent->mChilds[0]);
changedLeaf.pushBack(splitParent->mChilds[1]);
returnNode = retNode;
}
if(rotationNode)
{
rotateTree(rotationNode, changedLeaf, largesRotateNode, bounds, true);
returnNode = NULL;
}
return returnNode;
}
}
}
// update the index, do a full remove/insert update
IncrementalAABBTreeNode* IncrementalAABBTree::update(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf)
{
PX_SIMD_GUARD;
IncrementalAABBTreeNode* removedNode = remove(node, index, bounds);
if(removedNode && removedNode->isLeaf())
{
changedLeaf.pushBack(removedNode);
}
return insert(index, bounds, changedLeaf);
}
// update the index, faster version with a lazy update of objects that moved just a bit
IncrementalAABBTreeNode* IncrementalAABBTree::updateFast(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf)
{
PX_SIMD_GUARD;
const Vec4V minV = V4ClearW(V4LoadU(&bounds[index].minimum.x));
const Vec4V maxV = V4ClearW(V4LoadU(&bounds[index].maximum.x));
// for update fast, we dont care if the tree gets slowly unbalanced, we are building a new tree already
if(nodeIntersection(*node, minV, maxV))
{
updateHierarchyAfterRemove(node, bounds);
return node;
}
else
{
IncrementalAABBTreeNode* removedNode = remove(node, index, bounds);
if(removedNode && removedNode->isLeaf())
{
changedLeaf.pushBack(removedNode);
}
return insert(index, bounds, changedLeaf);
}
}
// remove primitive from the tree, return a node if it moved to its parent
IncrementalAABBTreeNode* IncrementalAABBTree::remove(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds)
{
PX_SIMD_GUARD;
PX_ASSERT(node->isLeaf());
// if we just remove the primitive from the list
if(node->getNbPrimitives() > 1)
{
removePrimitiveFromNode(node, index);
// update the hierarchy
updateHierarchyAfterRemove(node, bounds);
return NULL;
}
else
{
// if root node and the last primitive remove root
if(node == mRoot)
{
#if DEALLOCATE_RESET
IncrementalAABBTreeNodePair* removedPair = reinterpret_cast<IncrementalAABBTreeNodePair*>(node);
removedPair->mNode0.mChilds[0] = NULL;
removedPair->mNode0.mChilds[1] = NULL;
removedPair->mNode1.mChilds[0] = NULL;
removedPair->mNode1.mChilds[1] = NULL;
#endif
mNodesPool.deallocate(reinterpret_cast<IncrementalAABBTreeNodePair*>(node));
mRoot = NULL;
return NULL;
}
else
{
// create new parent and remove the current leaf
IncrementalAABBTreeNode* parent = node->mParent;
IncrementalAABBTreeNodePair* removedPair = reinterpret_cast<IncrementalAABBTreeNodePair*>(parent->mChilds[0]);
PX_ASSERT(!parent->isLeaf());
// copy the remaining child into parent
IncrementalAABBTreeNode* remainingChild = (parent->mChilds[0] == node) ? parent->mChilds[1] : parent->mChilds[0];
parent->mBVMax = remainingChild->mBVMax;
parent->mBVMin = remainingChild->mBVMin;
if(remainingChild->isLeaf())
{
parent->mIndices = remainingChild->mIndices;
parent->mChilds[1] = NULL;
}
else
{
parent->mChilds[0] = remainingChild->mChilds[0];
parent->mChilds[0]->mParent = parent;
parent->mChilds[1] = remainingChild->mChilds[1];
parent->mChilds[1]->mParent = parent;
}
if(parent->mParent)
{
updateHierarchyAfterRemove(parent->mParent, bounds);
}
mIndicesPool.deallocate(node->mIndices);
#if DEALLOCATE_RESET
removedPair->mNode0.mChilds[0] = NULL;
removedPair->mNode0.mChilds[1] = NULL;
removedPair->mNode1.mChilds[0] = NULL;
removedPair->mNode1.mChilds[1] = NULL;
#endif
mNodesPool.deallocate(removedPair);
return parent;
}
}
}
// fixup the indices
void IncrementalAABBTree::fixupTreeIndices(IncrementalAABBTreeNode* node, const PoolIndex index, const PoolIndex newIndex)
{
PX_ASSERT(node->isLeaf());
AABBTreeIndices& indices = *node->mIndices;
for(PxU32 i = 0; i < indices.nbIndices; i++)
{
if(indices.indices[i] == index)
{
indices.indices[i] = newIndex;
return;
}
}
PX_ASSERT(0);
}
// shift node
static void shiftNode(IncrementalAABBTreeNode* node, const Vec4V& shiftV)
{
node->mBVMax = V4Sub(node->mBVMax, shiftV);
node->mBVMin = V4Sub(node->mBVMin, shiftV);
if(!node->isLeaf())
{
shiftNode(node->mChilds[0], shiftV);
shiftNode(node->mChilds[1], shiftV);
}
}
// shift origin
void IncrementalAABBTree::shiftOrigin(const PxVec3& shift)
{
if(mRoot)
{
const Vec4V shiftV = V4ClearW(V4LoadU(&shift.x));
shiftNode(mRoot, shiftV);
}
}
static void checkNode(IncrementalAABBTreeNode* node, IncrementalAABBTreeNode* parent, const PxBounds3* bounds, PoolIndex maxIndex, PxU32& numIndices, PxU32& numNodes)
{
PX_ASSERT(node->mParent == parent);
PX_ASSERT(!parent->isLeaf());
PX_ASSERT(parent->mChilds[0] == node || parent->mChilds[1] == node);
numNodes++;
if(!node->isLeaf())
{
PX_ASSERT(nodeInsideBounds(node->mChilds[0]->mBVMin, node->mChilds[0]->mBVMax, node->mBVMin, node->mBVMax));
PX_ASSERT(nodeInsideBounds(node->mChilds[1]->mBVMin, node->mChilds[1]->mBVMax, node->mBVMin, node->mBVMax));
const Vec4V testMinV = V4Min(parent->mChilds[0]->mBVMin, parent->mChilds[1]->mBVMin);
const Vec4V testMaxV = V4Max(parent->mChilds[0]->mBVMax, parent->mChilds[1]->mBVMax);
PX_UNUSED(testMinV);
PX_UNUSED(testMaxV);
PX_ASSERT(nodeInsideBounds(node->mBVMin, node->mBVMax, testMinV, testMaxV));
checkNode(node->mChilds[0], node, bounds, maxIndex, numIndices, numNodes);
checkNode(node->mChilds[1], node, bounds, maxIndex, numIndices, numNodes);
}
else
{
const AABBTreeIndices& indices = *node->mIndices;
PX_ASSERT(indices.nbIndices);
Vec4V testMinV = V4ClearW(V4LoadU(&bounds[indices.indices[0]].minimum.x));
Vec4V testMaxV = V4ClearW(V4LoadU(&bounds[indices.indices[0]].maximum.x));
for(PxU32 i = 0; i < indices.nbIndices; i++)
{
PX_ASSERT(indices.indices[i] < maxIndex);
numIndices++;
const Vec4V minV = V4ClearW(V4LoadU(&bounds[indices.indices[i]].minimum.x));
const Vec4V maxV = V4ClearW(V4LoadU(&bounds[indices.indices[i]].maximum.x));
testMinV = V4Min(testMinV, minV);
testMaxV = V4Max(testMaxV, maxV);
PX_ASSERT(nodeInsideBounds(minV, maxV, node->mBVMin, node->mBVMax));
}
PX_ASSERT(boundsEqual(testMinV, testMaxV, node->mBVMin, node->mBVMax));
}
}
void IncrementalAABBTree::hierarchyCheck(PoolIndex maxIndex, const PxBounds3* bounds)
{
PxU32 numHandles = 0;
PxU32 numPosNodes = 0;
PxU32 numNegNodes = 0;
if(mRoot && !mRoot->isLeaf())
{
checkNode(mRoot->mChilds[0], mRoot, bounds, maxIndex, numHandles, numPosNodes);
checkNode(mRoot->mChilds[1], mRoot, bounds, maxIndex, numHandles, numNegNodes);
PX_ASSERT(numHandles == maxIndex);
}
}
void IncrementalAABBTree::hierarchyCheck(const PxBounds3* bounds)
{
PxU32 numHandles = 0;
PxU32 numPosNodes = 0;
PxU32 numNegNodes = 0;
if(mRoot && !mRoot->isLeaf())
{
checkNode(mRoot->mChilds[0], mRoot, bounds, 0xFFFFFFFF, numHandles, numPosNodes);
checkNode(mRoot->mChilds[1], mRoot, bounds, 0xFFFFFFFF, numHandles, numNegNodes);
}
}
void IncrementalAABBTree::checkTreeLeaf(IncrementalAABBTreeNode* leaf, PoolIndex h)
{
PX_ASSERT(leaf->isLeaf());
const AABBTreeIndices& indices = *leaf->mIndices;
bool found = false;
for(PxU32 i = 0; i < indices.nbIndices; i++)
{
if(indices.indices[i] == h)
{
found = true;
break;
}
}
PX_UNUSED(found);
PX_ASSERT(found);
}
PxU32 IncrementalAABBTree::getTreeLeafDepth(IncrementalAABBTreeNode* leaf)
{
PxU32 depth = 1;
IncrementalAABBTreeNode* parent = leaf->mParent;
while(parent)
{
depth++;
parent = parent->mParent;
}
return depth;
}
// build the tree from given bounds
bool IncrementalAABBTree::build(const AABBTreeBuildParams& params, PxArray<IncrementalAABBTreeNode*>& mapping)
{
// Init stats
BuildStats stats;
const PxU32 nbPrimitives = params.mNbPrimitives;
if (!nbPrimitives)
return false;
PxU32* indices = buildAABBTree(params, mNodeAllocator, stats);
PX_ASSERT(indices);
PX_FREE(params.mCache);
IncrementalAABBTreeNode** treeNodes = PX_ALLOCATE(IncrementalAABBTreeNode*, stats.getCount(), "temp node helper array");
PxMemSet(treeNodes, 0, sizeof(IncrementalAABBTreeNode*)*(stats.getCount()));
clone(mapping, indices, treeNodes);
mRoot = treeNodes[0];
mRoot->mParent = NULL;
PX_FREE(indices);
PX_FREE(treeNodes);
mNodeAllocator.release();
return true;
}
// clone the tree, the tree is computed in the NodeAllocator, similar to AABBTree flatten
void IncrementalAABBTree::clone(PxArray<IncrementalAABBTreeNode*>& mapping, const PxU32* _indices, IncrementalAABBTreeNode** treeNodes)
{
PxU32 offset = 0;
const PxU32 nbSlabs = mNodeAllocator.mSlabs.size();
for (PxU32 s = 0; s<nbSlabs; s++)
{
const NodeAllocator::Slab& currentSlab = mNodeAllocator.mSlabs[s];
AABBTreeBuildNode* pool = currentSlab.mPool;
for (PxU32 i = 0; i < currentSlab.mNbUsedNodes; i++)
{
IncrementalAABBTreeNode* destNode = treeNodes[offset];
if(!destNode)
{
destNode = reinterpret_cast<IncrementalAABBTreeNode*>(mNodesPool.allocate());
treeNodes[offset] = destNode;
}
destNode->mBVMin = V4ClearW(V4LoadU(&pool[i].mBV.minimum.x));
destNode->mBVMax = V4ClearW(V4LoadU(&pool[i].mBV.maximum.x));
if (pool[i].isLeaf())
{
AABBTreeIndices* indices = mIndicesPool.allocate();
destNode->mIndices = indices;
destNode->mChilds[1] = NULL;
indices->nbIndices = pool[i].getNbPrimitives();
PX_ASSERT(indices->nbIndices <= 16);
const PxU32* sourceIndices = _indices + pool[i].mNodeIndex;
for (PxU32 iIndices = 0; iIndices < indices->nbIndices; iIndices++)
{
const PxU32 sourceIndex = sourceIndices[iIndices];
indices->indices[iIndices] = sourceIndex;
PX_ASSERT(sourceIndex < mapping.size());
mapping[sourceIndex] = destNode;
}
}
else
{
PX_ASSERT(pool[i].mPos);
PxU32 localNodeIndex = 0xffffffff;
PxU32 nodeBase = 0;
for (PxU32 j = 0; j<nbSlabs; j++)
{
if (pool[i].mPos >= mNodeAllocator.mSlabs[j].mPool && pool[i].mPos < mNodeAllocator.mSlabs[j].mPool + mNodeAllocator.mSlabs[j].mNbUsedNodes)
{
localNodeIndex = PxU32(pool[i].mPos - mNodeAllocator.mSlabs[j].mPool);
break;
}
nodeBase += mNodeAllocator.mSlabs[j].mNbUsedNodes;
}
const PxU32 nodeIndex = nodeBase + localNodeIndex;
IncrementalAABBTreeNode* child0 = treeNodes[nodeIndex];
IncrementalAABBTreeNode* child1 = treeNodes[nodeIndex + 1];
if(!child0)
{
PX_ASSERT(!child1);
child0 = reinterpret_cast<IncrementalAABBTreeNode*>(mNodesPool.allocate());
child1 = child0 + 1;
treeNodes[nodeIndex] = child0;
treeNodes[nodeIndex + 1] = child1;
}
destNode->mChilds[0] = child0;
destNode->mChilds[1] = child1;
child0->mParent = destNode;
child1->mParent = destNode;
}
offset++;
}
}
}
void IncrementalAABBTree::copyNode(IncrementalAABBTreeNode& destNode, const BVHNode& sourceNode,
const BVHNode* nodeBase, IncrementalAABBTreeNode* parent, const PxU32* primitivesBase,
PxArray<IncrementalAABBTreeNode*>& mapping)
{
destNode.mParent = parent;
destNode.mBVMin = V4ClearW(V4LoadU(&sourceNode.mBV.minimum.x));
destNode.mBVMax = V4ClearW(V4LoadU(&sourceNode.mBV.maximum.x));
if(sourceNode.isLeaf())
{
AABBTreeIndices* indices = mIndicesPool.allocate();
destNode.mIndices = indices;
indices->nbIndices = sourceNode.getNbPrimitives();
const PxU32* sourceIndices = sourceNode.getPrimitives(primitivesBase);
for(PxU32 i = 0; i < indices->nbIndices; i++)
{
const PxU32 sourceIndex = sourceIndices[i];
indices->indices[i] = sourceIndex;
mapping[sourceIndex] = &destNode;
}
}
else
{
IncrementalAABBTreeNodePair* nodePair = mNodesPool.construct();
IncrementalAABBTreeNode* child0 = &nodePair->mNode0;
IncrementalAABBTreeNode* child1 = &nodePair->mNode1;
destNode.mChilds[0] = child0;
destNode.mChilds[1] = child1;
copyNode(*destNode.mChilds[0], *sourceNode.getPos(nodeBase), nodeBase, &destNode, primitivesBase, mapping);
copyNode(*destNode.mChilds[1], *sourceNode.getNeg(nodeBase), nodeBase, &destNode, primitivesBase, mapping);
}
}
// build the tree from the prebuild AABB tree
void IncrementalAABBTree::copy(const BVH& bvh, PxArray<IncrementalAABBTreeNode*>& mapping)
{
if(bvh.getNbBounds() == 0)
return;
IncrementalAABBTreeNodePair* nodePair = mNodesPool.construct();
mRoot = &nodePair->mNode0;
const BVHNode* nodes = bvh.getNodes();
copyNode(*mRoot, *nodes, nodes, NULL, bvh.getIndices(), mapping);
}
| 34,872 | C++ | 31.622077 | 178 | 0.722385 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMTD.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_MTD_H
#define GU_MTD_H
#include "foundation/PxVec3.h"
#include "foundation/PxTransform.h"
#include "geometry/PxGeometry.h"
namespace physx
{
namespace Gu
{
// PT: we use a define to be able to quickly change the signature of all MTD functions.
// (this also ensures they all use consistent names for passed parameters).
// \param[out] mtd computed depenetration dir
// \param[out] depth computed depenetration depth
// \param[in] geom0 first geometry object
// \param[in] pose0 pose of first geometry object
// \param[in] geom1 second geometry object
// \param[in] pose1 pose of second geometry object
// \param[in] cache optional cached data for triggers
#define GU_MTD_FUNC_PARAMS PxVec3& mtd, PxF32& depth, \
const PxGeometry& geom0, const PxTransform32& pose0, \
const PxGeometry& geom1, const PxTransform32& pose1
// PT: function pointer for Geom-indexed MTD functions
// See GU_MTD_FUNC_PARAMS for function parameters details.
// \return true if an overlap was found, false otherwise
// \note depenetration vector D is equal to mtd * depth. It should be applied to the 1st object, to get out of the 2nd object.
typedef bool (*GeomMTDFunc) (GU_MTD_FUNC_PARAMS);
PX_FORCE_INLINE PxF32 manualNormalize(PxVec3& mtd, const PxVec3& normal, PxReal lenSq)
{
const PxF32 len = PxSqrt(lenSq);
// We do a *manual* normalization to check for singularity condition
if(lenSq < 1e-6f)
mtd = PxVec3(1.0f, 0.0f, 0.0f); // PT: zero normal => pick up random one
else
mtd = normal * 1.0f / len;
return len;
}
}
}
#endif
| 3,284 | C | 42.799999 | 128 | 0.740865 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSDF.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SDF_H
#define GU_SDF_H
/** \addtogroup geomutils
@{
*/
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxVec3.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxArray.h"
#include "foundation/PxMathUtils.h"
namespace physx
{
class PxSDFBuilder;
class PxSerializationContext;
class PxDeserializationContext;
namespace Gu
{
/**
\brief Represents dimensions of signed distance field
*/
class Dim3
{
public:
/**
\brief Constructor
*/
Dim3()
{
}
/**
\brief Constructor
*/
Dim3(PxZERO d) : x(0), y(0), z(0)
{
PX_UNUSED(d);
}
/**
\brief Constructor
*/
Dim3(PxU32 _x, PxU32 _y, PxU32 _z) : x(_x), y(_y), z(_z)
{
}
/**
\brief Copy constructor
*/
Dim3(const Dim3& d) : x(d.x), y(d.y), z(d.z)
{
}
PxU32 x; //!< Size of X dimension
PxU32 y; //!< Size of Y dimension
PxU32 z; //!< Size of Z dimension
};
/**
\brief Represents a signed distance field.
*/
class SDF : public PxUserAllocated
{
public:
// PX_SERIALIZATION
SDF(const PxEMPTY) : mOwnsMemory(false) {}
void exportExtraData(PxSerializationContext& context);
void importExtraData(PxDeserializationContext& context);
static void getBinaryMetaData(PxOutputStream& stream);
//~PX_SERIALIZATION
/**
\brief Constructor
*/
SDF() : mSdf(NULL), mSubgridStartSlots(NULL), mSubgridSdf(NULL), mOwnsMemory(true)
{
}
/**
\brief Constructor
*/
SDF(PxZERO s)
: mMeshLower(PxZero), mSpacing(0.0f), mDims(PxZero), mNumSdfs(0), mSdf(NULL),
mSubgridSize(PxZero), mNumStartSlots(0), mSubgridStartSlots(NULL), mNumSubgridSdfs(0), mSubgridSdf(NULL), mSdfSubgrids3DTexBlockDim(PxZero),
mSubgridsMinSdfValue(0.0f), mSubgridsMaxSdfValue(0.0f), mBytesPerSparsePixel(0), mOwnsMemory(true)
{
PX_UNUSED(s);
}
/**
\brief Copy constructor
*/
SDF(const SDF& sdf)
: mMeshLower(sdf.mMeshLower), mSpacing(sdf.mSpacing), mDims(sdf.mDims), mNumSdfs(sdf.mNumSdfs), mSdf(sdf.mSdf),
mSubgridSize(sdf.mSubgridSize), mNumStartSlots(sdf.mNumStartSlots), mSubgridStartSlots(sdf.mSubgridStartSlots), mNumSubgridSdfs(sdf.mNumSubgridSdfs), mSubgridSdf(sdf.mSubgridSdf), mSdfSubgrids3DTexBlockDim(sdf.mSdfSubgrids3DTexBlockDim),
mSubgridsMinSdfValue(sdf.mSubgridsMinSdfValue), mSubgridsMaxSdfValue(sdf.mSubgridsMaxSdfValue), mBytesPerSparsePixel(sdf.mBytesPerSparsePixel),
mOwnsMemory(true)
{
}
static PX_FORCE_INLINE void decodeTriple(PxU32 id, PxU32& x, PxU32& y, PxU32& z)
{
x = id & 0x000003FF;
id = id >> 10;
y = id & 0x000003FF;
id = id >> 10;
z = id & 0x000003FF;
}
static PX_FORCE_INLINE PxReal decodeSample(PxU8* data, PxU32 index, PxU32 bytesPerSparsePixel, PxReal subgridsMinSdfValue, PxReal subgridsMaxSdfValue)
{
switch (bytesPerSparsePixel)
{
case 1:
return PxReal(data[index]) * (1.0f / 255.0f) * (subgridsMaxSdfValue - subgridsMinSdfValue) + subgridsMinSdfValue;
case 2:
{
PxU16* ptr = reinterpret_cast<PxU16*>(data);
return PxReal(ptr[index]) * (1.0f / 65535.0f) * (subgridsMaxSdfValue - subgridsMinSdfValue) + subgridsMinSdfValue;
}
case 4:
{
//If 4 bytes per subgrid pixel are available, then normal floats are used. No need to
//de-normalize integer values since the floats already contain real distance values
PxReal* ptr = reinterpret_cast<PxReal*>(data);
return ptr[index];
}
default:
PX_ASSERT(0);
}
return 0;
}
PX_PHYSX_COMMON_API PxReal decodeSparse(PxI32 xx, PxI32 yy, PxI32 zz) const;
PX_PHYSX_COMMON_API PxReal decodeDense(PxI32 x, PxI32 y, PxI32 z) const;
PX_FORCE_INLINE PxU32 nbSubgridsX() const
{
return mDims.x / mSubgridSize;
}
PX_FORCE_INLINE PxU32 nbSubgridsY() const
{
return mDims.y / mSubgridSize;
}
PX_FORCE_INLINE PxU32 nbSubgridsZ() const
{
return mDims.z / mSubgridSize;
}
PX_FORCE_INLINE PxVec3 getCellSize() const
{
return PxVec3(mSpacing);
}
PX_FORCE_INLINE bool subgridExists(PxU32 sgX, PxU32 sgY, PxU32 sgZ) const
{
const PxU32 nbX = mDims.x / mSubgridSize;
const PxU32 nbY = mDims.y / mSubgridSize;
//const PxU32 nbZ = mDims.z / mSubgridSize;
PxU32 startId = mSubgridStartSlots[sgZ * (nbX) * (nbY) + sgY * (nbX) + sgX];
return startId != 0xFFFFFFFFu;
}
/**
\brief Destructor
*/
~SDF();
PxReal* allocateSdfs(const PxVec3& meshLower, const PxReal& spacing, const PxU32 dimX, const PxU32 dimY, const PxU32 dimZ,
const PxU32 subgridSize, const PxU32 sdfSubgrids3DTexBlockDimX, const PxU32 sdfSubgrids3DTexBlockDimY, const PxU32 sdfSubgrids3DTexBlockDimZ,
PxReal subgridsMinSdfValue, PxReal subgridsMaxSdfValue, PxU32 bytesPerSparsePixel);
PxVec3 mMeshLower; //!< Lower bound of the original mesh
PxReal mSpacing; //!< Spacing of each sdf voxel
Dim3 mDims; //!< Dimension of the sdf
PxU32 mNumSdfs; //!< Number of sdf values
PxReal* mSdf; //!< Array of sdf
// Additional data to support sparse grid SDFs
PxU32 mSubgridSize; //!< The number of cells in a sparse subgrid block (full block has mSubgridSize^3 cells and (mSubgridSize+1)^3 samples). If set to zero, this indicates that only a dense background grid SDF is used without sparse blocks
PxU32 mNumStartSlots; //!< Array length of mSubgridStartSlots. Only used for serialization
PxU32* mSubgridStartSlots; //!< Array with start indices into the subgrid texture for every subgrid block. 10bits for z coordinate, 10bits for y and 10bits for x
PxU32 mNumSubgridSdfs; //!< Array length of mSubgridSdf. Only used for serialization
PxU8* mSubgridSdf; //!< The data to create the 3d texture containg the packed subgrid blocks. Stored as PxU8 to support multiple formats (8, 16 and 32 bits per pixel)
Dim3 mSdfSubgrids3DTexBlockDim; //!< Subgrid sdf is layed out as a 3d texture including packed blocks of size (mSubgridSize+1)^3
PxReal mSubgridsMinSdfValue; //!< The minimum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats
PxReal mSubgridsMaxSdfValue; //!< The maximum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats
PxU32 mBytesPerSparsePixel; //!< The number of bytes per subgrid pixel
bool mOwnsMemory; //!< Only false for binary deserialized data
};
/**
\brief Returns the number of times a point is enclosed by a triangle mesh. Therefore points with a winding number of 0 lie oufside of the mesh, others lie inside. The sign of the winding number
is dependent ond the triangle orientation. For close meshes, a robust inside/outside check should not test for a value of 0 exactly, inside = PxAbs(windingNumber) > 0.5f should be preferred.
\param[in] vertices The triangle mesh's vertices
\param[in] indices The triangle mesh's indices
\param[in] numTriangleIndices The number of indices
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[out] windingNumbers The winding number for the center of every grid cell, index rule is: index = z * width * height + y * width + x
\param[in] minExtents The grid's lower corner
\param[in] maxExtents The grid's upper corner
\param[out] sampleLocations Optional buffer to output the grid sample locations, index rule is: index = z * width * height + y * width + x
*/
PX_PHYSX_COMMON_API void windingNumbers(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
PxReal* windingNumbers, PxVec3 minExtents, PxVec3 maxExtents, PxVec3* sampleLocations = NULL);
/**
\brief Returns if a point is enclosed by a triangle mesh.
\param[in] vertices The triangle mesh's vertices
\param[in] indices The triangle mesh's indices
\param[in] numTriangleIndices The number of indices
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[out] insideResult Booleans that indicate if the center of a grid cell is inside or outside, index rule is: index = z * width * height + y * width + x
\param[in] minExtents The grid's lower corner, the box formed by minExtent and maxExtent must include all vertices
\param[in] maxExtents The grid's upper corner, the box formed by minExtent and maxExtent must include all vertices
\param[out] sampleLocations Optional buffer to output the grid sample locations, index rule is: index = z * width * height + y * width + x
*/
PX_PHYSX_COMMON_API void windingNumbersInsideCheck(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
bool* insideResult, PxVec3 minExtents, PxVec3 maxExtents, PxVec3* sampleLocations = NULL);
/**
\brief Returns the distance to the mesh's surface for all samples in a grid. The sign is dependent on the triangle orientation. Negative distances indicate that a sample is inside the mesh, positive
distances mean the sample is outside of the mesh.
\param[in] vertices The triangle mesh's vertices
\param[in] indices The triangle mesh's indices
\param[in] numTriangleIndices The number of indices
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[out] sdf The signed distance field (negative values indicate that a point is inside of the mesh), index rule is: index = z * width * height + y * width + x
\param[in] minExtents The grid's lower corner, the box formed by minExtent and maxExtent must include all vertices
\param[in] maxExtents The grid's upper corner, the box formed by minExtent and maxExtent must include all vertices
\param[out] sampleLocations Optional buffer to output the grid sample locations, index rule is: index = z * width * height + y * width + x
\param[in] cellCenteredSamples Determines if the sample points are chosen at cell centers or at cell origins
\param[in] numThreads The number of cpu threads to use during the computation
\param[in] sdfBuilder Optional pointer to a sdf builder to accelerate the sdf construction. The pointer is owned by the caller and must remain valid until the function terminates.
*/
PX_PHYSX_COMMON_API void SDFUsingWindingNumbers(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
PxReal* sdf, PxVec3 minExtents, PxVec3 maxExtents, PxVec3* sampleLocations = NULL, bool cellCenteredSamples = true,
PxU32 numThreads = 1, PxSDFBuilder* sdfBuilder = NULL);
/**
\brief Returns the distance to the mesh's surface for all samples in a grid. The sign is dependent on the triangle orientation. Negative distances indicate that a sample is inside the mesh, positive
distances mean the sample is outside of the mesh. Near mesh surfaces, a higher resolution is available than further away from the surface (sparse sdf format) to save memory.
The samples are not cell centered but located at the cell origin. This is a requirement of the sparse grid format.
\param[in] vertices The triangle mesh's vertices
\param[in] indices The triangle mesh's indices
\param[in] numTriangleIndices The number of indices
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[in] minExtents The grid's lower corner, the box formed by minExtent and maxExtent must include all vertices
\param[in] maxExtents The grid's upper corner, the box formed by minExtent and maxExtent must include all vertices
\param[in] narrowBandThicknessRelativeToExtentDiagonal The thickness of the narrow band as a fraction of the sdf box diagonal length. Can be as small as 0 but a value of at least 0.01 is recommended.
\param[in] cellsPerSubgrid The number of cells in a sparse subgrid block (full block has mSubgridSize^3 cells and (mSubgridSize+1)^3 samples)
\param[out] sdfCoarse The coarse sdf as a dense 3d array of lower resolution (resulution is (with/cellsPerSubgrid+1, height/cellsPerSubgrid+1, depth/cellsPerSubgrid+1))
\param[out] sdfFineStartSlots The start slot indices of the subgrid blocks. If a subgrid block is empty, the start slot will be 0xFFFFFFFF
\param[out] subgridData The array containing subgrid data blocks
\param[out] denseSdf Provides acces to the denxe sdf that is used for compuation internally
\param[out] subgridsMinSdfValue The minimum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats
\param[out] subgridsMaxSdfValue The maximum value over all subgrid blocks. Used if normalized textures are used which is the case for 8 and 16bit formats
\param[in] numThreads The number of cpu threads to use during the computation
\param[in] sdfBuilder Optional pointer to a sdf builder to accelerate the sdf construction. The pointer is owned by the caller and must remain valid until the function terminates.
*/
PX_PHYSX_COMMON_API void SDFUsingWindingNumbersSparse(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
const PxVec3& minExtents, const PxVec3& maxExtents, PxReal narrowBandThicknessRelativeToExtentDiagonal, PxU32 cellsPerSubgrid,
PxArray<PxReal>& sdfCoarse, PxArray<PxU32>& sdfFineStartSlots, PxArray<PxReal>& subgridData, PxArray<PxReal>& denseSdf,
PxReal& subgridsMinSdfValue, PxReal& subgridsMaxSdfValue, PxU32 numThreads = 1, PxSDFBuilder* sdfBuilder = NULL);
PX_PHYSX_COMMON_API void analyzeAndFixMesh(const PxVec3* vertices, const PxU32* indicesOrig, PxU32 numTriangleIndices, PxArray<PxU32>& repairedIndices);
/**
\brief Converts a sparse grid sdf to a format that can be used to create a 3d texture. 3d textures support very efficient
trilinear interpolation on the GPU which is very important during sdf evaluation.
\param[in] width The number of grid points along the x direction
\param[in] height The number of grid points along the y direction
\param[in] depth The number of grid points along the z direction
\param[in] cellsPerSubgrid The number of cells in a sparse subgrid block (full block has mSubgridSize^3 cells and (mSubgridSize+1)^3 samples)
\param[in,out] sdfFineStartSlots Array with linear start indices into the subgrid data array. This array gets converted by this method to start indices for every subgrid block in the 3d texture. The result uses 10bits for z coordinate, 10bits for y and 10bits for x
\param[in] sdfFineSubgridsIn Subgrid data array
\param[in] sdfFineSubgridsSize Number of elements in sdfFineSubgridsIn
\param[out] subgrids3DTexFormat The subgrid data organized in a 3d texture compatible order
\param[out] numSubgridsX Number of subgrid blocks in the 3d texture along x. The full texture dimension along x will be numSubgridsX*(cellsPerSubgrid+1).
\param[out] numSubgridsY Number of subgrid blocks in the 3d texture along y. The full texture dimension along y will be numSubgridsY*(cellsPerSubgrid+1).
\param[out] numSubgridsZ Number of subgrid blocks in the 3d texture along z. The full texture dimension along z will be numSubgridsZ*(cellsPerSubgrid+1).
*/
PX_PHYSX_COMMON_API void convertSparseSDFTo3DTextureLayout(PxU32 width, PxU32 height, PxU32 depth, PxU32 cellsPerSubgrid,
PxU32* sdfFineStartSlots, const PxReal* sdfFineSubgridsIn, PxU32 sdfFineSubgridsSize, PxArray<PxReal>& subgrids3DTexFormat,
PxU32& numSubgridsX, PxU32& numSubgridsY, PxU32& numSubgridsZ);
/**
\brief Extracts an isosurface as a triangular mesh from a signed distance function
\param[in] sdf The signed distance function
\param[out] isosurfaceVertices The vertices of the extracted isosurface
\param[out] isosurfaceTriangleIndices The triangles of the extracted isosurface
\param[in] numThreads The number of threads to use
*/
PX_PHYSX_COMMON_API void extractIsosurfaceFromSDF(const Gu::SDF& sdf, PxArray<PxVec3>& isosurfaceVertices, PxArray<PxU32>& isosurfaceTriangleIndices, PxU32 numThreads = 1);
/**
\brief A class that allows to efficiently project points onto the surface of a triangle mesh.
*/
class PxPointOntoTriangleMeshProjector
{
public:
/**
\brief Projects a point onto the surface of a triangle mesh.
\param[in] point The point to project
\return the projected point
*/
virtual PxVec3 projectPoint(const PxVec3& point) = 0;
/**
\brief Projects a point onto the surface of a triangle mesh.
\param[in] point The point to project
\param[out] closestTriangleIndex The index of the triangle on which the projected point is located
\return the projected point
*/
virtual PxVec3 projectPoint(const PxVec3& point, PxU32& closestTriangleIndex) = 0;
/**
\brief Releases the instance and its data
*/
virtual void release() = 0;
};
/**
\brief Creates a helper class that allows to efficiently project points onto the surface of a triangle mesh.
\param[in] vertices The triangle mesh's vertices
\param[in] triangleIndices The triangle mesh's indices
\param[in] numTriangles The number of triangles
\return A point onto triangle mesh projector instance. The caller needs to delete the instance once it is not used anymore by calling its release method
*/
PX_PHYSX_COMMON_API PxPointOntoTriangleMeshProjector* PxCreatePointOntoTriangleMeshProjector(const PxVec3* vertices, const PxU32* triangleIndices, PxU32 numTriangles);
/**
\brief Utility to convert from a linear index to x/y/z indices given the grid size (only sizeX and sizeY required)
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE void idToXYZ(PxU32 id, PxU32 sizeX, PxU32 sizeY, PxU32& xi, PxU32& yi, PxU32& zi)
{
xi = id % sizeX; id /= sizeX;
yi = id % sizeY;
zi = id / sizeY;
}
/**
\brief Utility to convert from x/y/z indices to a linear index given the grid size (only width and height required)
*/
PX_FORCE_INLINE PX_CUDA_CALLABLE PxU32 idx3D(PxU32 x, PxU32 y, PxU32 z, PxU32 width, PxU32 height)
{
return (z * height + y) * width + x;
}
/**
\brief Utility to encode 3 indices into a single integer. Each index is allowed to use up to 10 bits.
*/
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 encodeTriple(PxU32 x, PxU32 y, PxU32 z)
{
PX_ASSERT(x >= 0 && x < 1024);
PX_ASSERT(y >= 0 && y < 1024);
PX_ASSERT(z >= 0 && z < 1024);
return (z << 20) | (y << 10) | x;
}
/**
\brief Computes sample point locations from x/y/z indices
*/
PX_ALIGN_PREFIX(16)
struct GridQueryPointSampler
{
private:
PxVec3 mOrigin;
PxVec3 mCellSize;
PxI32 mOffsetX, mOffsetY, mOffsetZ;
PxI32 mStepX, mStepY, mStepZ;
public:
PX_CUDA_CALLABLE GridQueryPointSampler() {}
PX_CUDA_CALLABLE GridQueryPointSampler(const PxVec3& origin, const PxVec3& cellSize, bool cellCenteredSamples,
PxI32 offsetX = 0, PxI32 offsetY = 0, PxI32 offsetZ = 0, PxI32 stepX = 1, PxI32 stepY = 1, PxI32 stepZ = 1)
: mCellSize(cellSize), mOffsetX(offsetX), mOffsetY(offsetY), mOffsetZ(offsetZ), mStepX(stepX), mStepY(stepY), mStepZ(stepZ)
{
if (cellCenteredSamples)
mOrigin = origin + 0.5f * cellSize;
else
mOrigin = origin;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getOrigin() const
{
return mOrigin;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getActiveCellSize() const
{
return PxVec3(mCellSize.x * mStepX, mCellSize.y * mStepY, mCellSize.z * mStepZ);
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxVec3 getPoint(PxI32 x, PxI32 y, PxI32 z) const
{
return PxVec3(mOrigin.x + (x * mStepX + mOffsetX) * mCellSize.x,
mOrigin.y + (y * mStepY + mOffsetY) * mCellSize.y,
mOrigin.z + (z * mStepZ + mOffsetZ) * mCellSize.z);
}
}
PX_ALIGN_SUFFIX(16);
/**
\brief Represents a dense SDF and allows to evaluate it. Uses trilinear interpolation between samples.
*/
class DenseSDF
{
public:
PxU32 mWidth, mHeight, mDepth;
private:
PxReal* mSdf;
public:
PX_INLINE PX_CUDA_CALLABLE DenseSDF(PxU32 width, PxU32 height, PxU32 depth, PxReal* sdf)
{
initialize(width, height, depth, sdf);
}
DenseSDF() {}
PX_FORCE_INLINE PX_CUDA_CALLABLE void initialize(PxU32 width, PxU32 height, PxU32 depth, PxReal* sdf)
{
this->mWidth = width;
this->mHeight = height;
this->mDepth = depth;
this->mSdf = sdf;
}
PX_FORCE_INLINE PxU32 memoryConsumption()
{
return mWidth * mHeight * mDepth * sizeof(PxReal);
}
PX_INLINE PX_CUDA_CALLABLE PxReal sampleSDFDirect(const PxVec3& samplePoint)
{
const PxU32 xBase = PxClamp(PxU32(samplePoint.x), 0u, mWidth - 2);
const PxU32 yBase = PxClamp(PxU32(samplePoint.y), 0u, mHeight - 2);
const PxU32 zBase = PxClamp(PxU32(samplePoint.z), 0u, mDepth - 2);
return Interpolation::PxTriLerp(
mSdf[idx3D(xBase, yBase, zBase, mWidth, mHeight)],
mSdf[idx3D(xBase + 1, yBase, zBase, mWidth, mHeight)],
mSdf[idx3D(xBase, yBase + 1, zBase, mWidth, mHeight)],
mSdf[idx3D(xBase + 1, yBase + 1, zBase, mWidth, mHeight)],
mSdf[idx3D(xBase, yBase, zBase + 1, mWidth, mHeight)],
mSdf[idx3D(xBase + 1, yBase, zBase + 1, mWidth, mHeight)],
mSdf[idx3D(xBase, yBase + 1, zBase + 1, mWidth, mHeight)],
mSdf[idx3D(xBase + 1, yBase + 1, zBase + 1, mWidth, mHeight)], samplePoint.x - xBase, samplePoint.y - yBase, samplePoint.z - zBase);
}
};
}
}
/** @} */
#endif
| 23,659 | C | 45.120858 | 267 | 0.728602 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMaverickNode.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_MAVERICK_NODE_H
#define GU_MAVERICK_NODE_H
#include "foundation/PxBounds3.h"
#include "foundation/PxTransform.h"
#include "common/PxPhysXCommonConfig.h"
#include "GuPrunerPayload.h"
#include "GuPrunerTypedef.h"
#define FREE_PRUNER_SIZE 16
#ifdef FREE_PRUNER_SIZE
namespace physx
{
namespace Gu
{
class MaverickNode
{
public:
MaverickNode() : mNbFree(0) {}
~MaverickNode() {}
PX_FORCE_INLINE void release() { mNbFree = 0; }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32*) const { return mIndices; }
PX_FORCE_INLINE PxU32 getPrimitiveIndex() const { return 0; }
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mNbFree; }
bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp);
bool updateObject(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform);
bool updateObject(PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform);
bool removeObject(const PrunerPayload& object, PxU32& timeStamp);
bool removeObject(PrunerHandle handle, PxU32& timeStamp);
PxU32 removeMarkedObjects(PxU32 timeStamp);
void shiftOrigin(const PxVec3& shift);
void remove(PxU32 index);
PxU32 mNbFree; // Current number of objects in the "free array" (mFreeObjects/mFreeBounds)
PrunerPayload mFreeObjects[FREE_PRUNER_SIZE]; // mNbFree objects are stored here
PrunerHandle mFreeHandles[FREE_PRUNER_SIZE]; // mNbFree handles are stored here
PxBounds3 mFreeBounds[FREE_PRUNER_SIZE]; // mNbFree object bounds are stored here
PxTransform mFreeTransforms[FREE_PRUNER_SIZE]; // mNbFree transforms are stored here
PxU32 mFreeStamps[FREE_PRUNER_SIZE];
static const PxU32 mIndices[FREE_PRUNER_SIZE];
};
}
}
#endif
#endif
| 3,637 | C | 42.831325 | 148 | 0.739071 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuPruningPool.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuPruningPool.h"
#include "foundation/PxMemory.h"
#include "common/PxProfileZone.h"
using namespace physx;
using namespace Gu;
PruningPool::PruningPool(PxU64 contextID, TransformCacheMode mode) :
mNbObjects (0),
mMaxNbObjects (0),
mObjects (NULL),
mTransforms (NULL),
mTransformCacheMode (mode),
mHandleToIndex (NULL),
mIndexToHandle (NULL),
mFirstRecycledHandle(INVALID_PRUNERHANDLE),
mContextID (contextID)
{
}
PruningPool::~PruningPool()
{
mWorldBoxes.release();
PX_FREE(mIndexToHandle);
PX_FREE(mHandleToIndex);
PX_FREE(mTransforms);
PX_FREE(mObjects);
}
bool PruningPool::resize(PxU32 newCapacity)
{
PX_PROFILE_ZONE("PruningPool::resize", mContextID);
const bool useTransforms = mTransformCacheMode!=TRANSFORM_CACHE_UNUSED;
PxTransform* newTransforms = useTransforms ? PX_ALLOCATE(PxTransform, newCapacity, "Pruner transforms") : NULL;
if(useTransforms && !newTransforms)
return false;
PrunerPayload* newData = PX_ALLOCATE(PrunerPayload, newCapacity, "PrunerPayload*");
PrunerHandle* newIndexToHandle = PX_ALLOCATE(PrunerHandle, newCapacity, "Pruner Index Mapping");
PoolIndex* newHandleToIndex = PX_ALLOCATE(PoolIndex, newCapacity, "Pruner Index Mapping");
if( (!newData) || (!newIndexToHandle) || (!newHandleToIndex))
{
PX_FREE(newHandleToIndex);
PX_FREE(newIndexToHandle);
PX_FREE(newTransforms);
PX_FREE(newData);
return false;
}
mWorldBoxes.resize(newCapacity, mNbObjects);
if(mObjects) PxMemCopy(newData, mObjects, mNbObjects*sizeof(PrunerPayload));
if(mTransforms) PxMemCopy(newTransforms, mTransforms, mNbObjects*sizeof(PxTransform));
if(mIndexToHandle) PxMemCopy(newIndexToHandle, mIndexToHandle, mNbObjects*sizeof(PrunerHandle));
if(mHandleToIndex) PxMemCopy(newHandleToIndex, mHandleToIndex, mMaxNbObjects*sizeof(PoolIndex)); // PT: why mMaxNbObjects here? on purpose?
mMaxNbObjects = newCapacity;
PX_FREE(mIndexToHandle);
PX_FREE(mHandleToIndex);
PX_FREE(mTransforms);
PX_FREE(mObjects);
mObjects = newData;
mTransforms = newTransforms;
mHandleToIndex = newHandleToIndex;
mIndexToHandle = newIndexToHandle;
return true;
}
void PruningPool::preallocate(PxU32 newCapacity)
{
if(newCapacity>mMaxNbObjects)
resize(newCapacity);
}
PxU32 PruningPool::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count)
{
PX_PROFILE_ZONE("PruningPool::addObjects", mContextID);
PX_ASSERT((!transforms && mTransformCacheMode==TRANSFORM_CACHE_UNUSED) || (transforms && mTransformCacheMode!=TRANSFORM_CACHE_UNUSED));
for(PxU32 i=0;i<count;i++)
{
if(mNbObjects==mMaxNbObjects) // increase the capacity on overflow
{
const PxU32 newCapacity = PxU32(float(mMaxNbObjects)*1.5f);
if(!resize(PxMax<PxU32>(newCapacity, 64)))
//if(!resize(PxMax<PxU32>(mMaxNbObjects*2, 64)))
{
// pool can return an invalid handle if memory alloc fails
// should probably have an error here or not handle this
results[i] = INVALID_PRUNERHANDLE; // PT: we need to write the potentially invalid handle to let users know which object failed first
return i;
}
}
PX_ASSERT(mNbObjects!=mMaxNbObjects);
const PoolIndex index = mNbObjects++;
// update mHandleToIndex and mIndexToHandle mappings
PrunerHandle handle;
if(mFirstRecycledHandle != INVALID_PRUNERHANDLE)
{
// mFirstRecycledHandle is an entry into a freelist for removed slots
// this path is only taken if we have any removed slots
handle = mFirstRecycledHandle;
mFirstRecycledHandle = mHandleToIndex[handle];
}
else
{
handle = index;
}
// PT: TODO: investigate why we added mIndexToHandle/mHandleToIndex. The initial design with 'Prunable' objects didn't need these arrays.
// PT: these arrays are "parallel"
mWorldBoxes.getBounds() [index] = bounds[i]; // store the payload/userData and AABB in parallel arrays
mObjects [index] = data[i];
mIndexToHandle [index] = handle;
if(transforms && mTransforms)
mTransforms [index] = transforms[i];
mHandleToIndex[handle] = index;
results[i] = handle;
}
return count;
}
PoolIndex PruningPool::removeObject(PrunerHandle h, PrunerPayloadRemovalCallback* removalCallback)
{
PX_PROFILE_ZONE("PruningPool::removeObject", mContextID);
PX_ASSERT(mNbObjects);
// remove the object and its AABB by provided PrunerHandle and update mHandleToIndex and mIndexToHandle mappings
const PoolIndex indexOfRemovedObject = mHandleToIndex[h]; // retrieve object's index from handle
if(removalCallback)
removalCallback->invoke(1, &mObjects[indexOfRemovedObject]);
const PoolIndex indexOfLastObject = --mNbObjects; // swap the object at last index with index
if(indexOfLastObject!=indexOfRemovedObject)
{
// PT: move last object's data to recycled spot (from removed object)
// PT: the last object has moved so we need to handle the mappings for this object
// PT: TODO: investigate where this double-mapping comes from. It was not needed in the original design.
// PT: these arrays are "parallel"
PxBounds3* bounds = mWorldBoxes.getBounds();
const PrunerHandle handleOfLastObject = mIndexToHandle[indexOfLastObject];
bounds [indexOfRemovedObject] = bounds [indexOfLastObject];
mObjects [indexOfRemovedObject] = mObjects [indexOfLastObject];
if(mTransforms)
mTransforms [indexOfRemovedObject] = mTransforms [indexOfLastObject];
mIndexToHandle [indexOfRemovedObject] = handleOfLastObject;
mHandleToIndex[handleOfLastObject] = indexOfRemovedObject;
}
// mHandleToIndex also stores the freelist for removed handles (in place of holes formed by removed handles)
mHandleToIndex[h] = mFirstRecycledHandle; // update linked list of available recycled handles
mFirstRecycledHandle = h; // update the list head
return indexOfLastObject;
}
void PruningPool::shiftOrigin(const PxVec3& shift)
{
PX_PROFILE_ZONE("PruningPool::shiftOrigin", mContextID);
const PxU32 nb = mNbObjects;
PxBounds3* bounds = mWorldBoxes.getBounds();
for(PxU32 i=0; i<nb; i++)
{
bounds[i].minimum -= shift;
bounds[i].maximum -= shift;
}
if(mTransforms && mTransformCacheMode==TRANSFORM_CACHE_GLOBAL)
{
for(PxU32 i=0; i<nb; i++)
mTransforms[i].p -= shift;
}
}
template<const bool hasTransforms>
static void updateAndInflateBounds(PruningPool& pool, const PrunerHandle* PX_RESTRICT handles, const PxU32* PX_RESTRICT boundsIndices, const PxBounds3* PX_RESTRICT newBounds,
const PxTransform32* PX_RESTRICT newTransforms, PxU32 count, float epsilon)
{
PxBounds3* PX_RESTRICT bounds = pool.mWorldBoxes.getBounds();
PxTransform* PX_RESTRICT transforms = hasTransforms ? pool.mTransforms : NULL;
if(boundsIndices)
{
while(count--)
{
const PoolIndex poolIndex = pool.getIndex(*handles++);
PX_ASSERT(poolIndex!=INVALID_PRUNERHANDLE);
const PxU32 remappedIndex = *boundsIndices++;
if(hasTransforms)
transforms[poolIndex] = newTransforms[remappedIndex];
inflateBounds<true>(bounds[poolIndex], newBounds[remappedIndex], epsilon);
}
}
else
{
while(count--)
{
const PoolIndex poolIndex = pool.getIndex(*handles++);
PX_ASSERT(poolIndex!=INVALID_PRUNERHANDLE);
if(hasTransforms)
{
transforms[poolIndex] = *newTransforms;
newTransforms++;
}
inflateBounds<true>(bounds[poolIndex], *newBounds++, epsilon);
}
}
}
void PruningPool::updateAndInflateBounds(const PrunerHandle* handles, const PxU32* boundsIndices, const PxBounds3* newBounds,
const PxTransform32* newTransforms, PxU32 count, float epsilon)
{
PX_PROFILE_ZONE("PruningPool::updateAndInflateBounds", mContextID);
if(mTransforms)
::updateAndInflateBounds<1>(*this, handles, boundsIndices, newBounds, newTransforms, count, epsilon);
else
::updateAndInflateBounds<0>(*this, handles, boundsIndices, newBounds, NULL, count, epsilon);
}
| 9,531 | C++ | 34.834586 | 174 | 0.754066 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuActorShapeMap.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuActorShapeMap.h"
#include "foundation/PxMemory.h"
using namespace physx;
using namespace Gu;
namespace physx
{
namespace Gu
{
/*PX_FORCE_INLINE*/ uint32_t PxComputeHash(const ActorShapeMap::ActorShape& owner)
{
PX_ASSERT(!(size_t(owner.mActor)&3));
PX_ASSERT(!(size_t(owner.mShape)&3));
const uint32_t id0 = uint32_t(size_t(owner.mActor)>>2);
const uint32_t id1 = uint32_t(size_t(owner.mShape)>>2);
const uint64_t mix = (uint64_t(id0)<<32)|uint64_t(id1);
return ::PxComputeHash(mix);
}
}
}
ActorShapeMap::ActorShapeMap() : mCacheSize(0), mCache(NULL)
{
}
ActorShapeMap::~ActorShapeMap()
{
PX_FREE(mCache);
}
void ActorShapeMap::resizeCache(PxU32 index)
{
PxU32 size = mCacheSize ? mCacheSize*2 : 64;
const PxU32 minSize = index+1;
if(minSize>size)
size = minSize*2;
Cache* items = PX_ALLOCATE(Cache, size, "Cache");
if(mCache)
PxMemCopy(items, mCache, mCacheSize*sizeof(Cache));
PxMemZero(items+mCacheSize, (size-mCacheSize)*sizeof(Cache));
PX_FREE(mCache);
mCache = items;
mCacheSize = size;
}
bool ActorShapeMap::add(PxU32 actorIndex, const void* actor, const void* shape, ActorShapeData actorShapeData)
{
if(actorIndex!=PX_INVALID_INDEX)
{
if(actorIndex>=mCacheSize)
resizeCache(actorIndex);
//if(!mCache[actorIndex].mActor)
if(!mCache[actorIndex].mShape)
{
//mCache[actorIndex].mActor = actor;
mCache[actorIndex].mShape = shape;
mCache[actorIndex].mData = actorShapeData;
return true;
}
//PX_ASSERT(mCache[actorIndex].mActor==actor);
PX_ASSERT(mCache[actorIndex].mShape);
if(mCache[actorIndex].mShape==shape)
{
mCache[actorIndex].mData = actorShapeData;
return false;
}
}
return mDatabase.insert(ActorShape(actor, shape), actorShapeData);
}
bool ActorShapeMap::remove(PxU32 actorIndex, const void* actor, const void* shape, ActorShapeData* removed)
{
if(actorIndex!=PX_INVALID_INDEX)
{
//if(mCache[actorIndex].mActor==actor && mCache[actorIndex].mShape==shape)
if(mCache[actorIndex].mShape==shape)
{
//mCache[actorIndex].mActor = NULL;
mCache[actorIndex].mShape = NULL;
PX_ASSERT(!mDatabase.erase(ActorShape(actor, shape)));
if(removed)
*removed = mCache[actorIndex].mData;
return true;
}
}
PxHashMap<ActorShape, ActorShapeData>::Entry removedEntry;
const bool found = mDatabase.erase(ActorShape(actor, shape), removedEntry);
if(found && removed)
*removed = removedEntry.second;
return found;
}
ActorShapeData ActorShapeMap::find(PxU32 actorIndex, const void* actor, const void* shape) const
{
if(actorIndex!=PX_INVALID_INDEX)
{
if(mCache[actorIndex].mShape==shape)
//if(mCache[actorIndex].mActor==actor && mCache[actorIndex].mShape==shape)
{
return mCache[actorIndex].mData;
}
}
const PxHashMap<ActorShape, ActorShapeData>::Entry* e = mDatabase.find(ActorShape(actor, shape));
PX_ASSERT(e);
return e->second;
}
| 4,580 | C++ | 31.260563 | 110 | 0.731878 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuGeometryChecks.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_GEOMETRY_CHECKS_H
#define GU_GEOMETRY_CHECKS_H
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include "geometry/PxPlaneGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxParticleSystemGeometry.h"
#include "geometry/PxTetrahedronMeshGeometry.h"
#include "geometry/PxTriangleMeshGeometry.h"
#include "geometry/PxHeightFieldGeometry.h"
#include "geometry/PxHairSystemGeometry.h"
#include "geometry/PxCustomGeometry.h"
namespace physx
{
// We sometimes overload capsule code for spheres, so every sphere should have
// valid capsule data (height = 0). This is preferable to a typedef so that we
// can maintain traits separately for a sphere, but some care is required to deal
// with the fact that when a reference to a capsule is extracted, it may have its
// type field set to eSPHERE
template <typename T>
struct PxcGeometryTraits
{
enum {TypeID = PxGeometryType::eINVALID };
};
template <typename T> struct PxcGeometryTraits<const T> { enum { TypeID = PxcGeometryTraits<T>::TypeID }; };
template <> struct PxcGeometryTraits<PxBoxGeometry> { enum { TypeID = PxGeometryType::eBOX }; };
template <> struct PxcGeometryTraits<PxSphereGeometry> { enum { TypeID = PxGeometryType::eSPHERE }; };
template <> struct PxcGeometryTraits<PxCapsuleGeometry> { enum { TypeID = PxGeometryType::eCAPSULE }; };
template <> struct PxcGeometryTraits<PxPlaneGeometry> { enum { TypeID = PxGeometryType::ePLANE }; };
template <> struct PxcGeometryTraits<PxParticleSystemGeometry> { enum { TypeID = PxGeometryType::ePARTICLESYSTEM}; };
template <> struct PxcGeometryTraits<PxConvexMeshGeometry> { enum { TypeID = PxGeometryType::eCONVEXMESH }; };
template <> struct PxcGeometryTraits<PxTriangleMeshGeometry> { enum { TypeID = PxGeometryType::eTRIANGLEMESH }; };
template <> struct PxcGeometryTraits<PxTetrahedronMeshGeometry> { enum { TypeID = PxGeometryType::eTETRAHEDRONMESH }; };
template <> struct PxcGeometryTraits<PxHeightFieldGeometry> { enum { TypeID = PxGeometryType::eHEIGHTFIELD }; };
template <> struct PxcGeometryTraits<PxHairSystemGeometry> { enum { TypeID = PxGeometryType::eHAIRSYSTEM }; };
template <> struct PxcGeometryTraits<PxCustomGeometry> { enum { TypeID = PxGeometryType::eCUSTOM }; };
template<class T> PX_CUDA_CALLABLE PX_FORCE_INLINE void checkType(const PxGeometry& geometry)
{
PX_ASSERT(PxU32(geometry.getType()) == PxU32(PxcGeometryTraits<T>::TypeID));
PX_UNUSED(geometry);
}
template<> PX_CUDA_CALLABLE PX_FORCE_INLINE void checkType<PxCapsuleGeometry>(const PxGeometry& geometry)
{
PX_ASSERT(geometry.getType() == PxGeometryType::eCAPSULE || geometry.getType() == PxGeometryType::eSPHERE);
PX_UNUSED(geometry);
}
template<> PX_CUDA_CALLABLE PX_FORCE_INLINE void checkType<const PxCapsuleGeometry>(const PxGeometry& geometry)
{
PX_ASSERT(geometry.getType()== PxGeometryType::eCAPSULE || geometry.getType() == PxGeometryType::eSPHERE);
PX_UNUSED(geometry);
}
}
#if !defined(__CUDACC__)
// the shape structure relies on punning capsules and spheres
PX_COMPILE_TIME_ASSERT(PX_OFFSET_OF(physx::PxCapsuleGeometry, radius) == PX_OFFSET_OF(physx::PxSphereGeometry, radius));
#endif
#endif
| 4,973 | C | 50.27835 | 122 | 0.760708 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSecondaryPruner.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SECONDARY_PRUNER_H
#define GU_SECONDARY_PRUNER_H
#include "common/PxPhysXCommonConfig.h"
#include "GuPruner.h"
namespace physx
{
class PxRenderOutput;
namespace Gu
{
class PruningPool;
class CompanionPruner : public PxUserAllocated
{
public:
CompanionPruner() {}
virtual ~CompanionPruner() {}
virtual bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, PoolIndex poolIndex) = 0;
virtual bool updateObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PoolIndex poolIndex) = 0;
virtual bool removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, PxU32 swapObjectIndex) = 0;
virtual void swapIndex(PxU32 objectIndex, PxU32 swapObjectIndex) = 0;
virtual PxU32 removeMarkedObjects(PxU32 timeStamp) = 0;
virtual void shiftOrigin(const PxVec3& shift) = 0;
virtual void timeStampChange() = 0;
virtual void build() = 0;
virtual PxU32 getNbObjects() const = 0;
virtual void release() = 0;
virtual void visualize(PxRenderOutput& out, PxU32 color) const = 0;
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const = 0;
virtual bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const = 0;
virtual bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const = 0;
virtual void getGlobalBounds(PxBounds3&) const = 0;
};
CompanionPruner* createCompanionPruner(PxU64 contextID, CompanionPrunerType type, const PruningPool* pool);
}
}
#endif
| 3,775 | C | 52.183098 | 175 | 0.705166 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBTreeNode.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABBTREE_NODE_H
#define GU_AABBTREE_NODE_H
#include "common/PxPhysXCommonConfig.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
using namespace aos;
namespace Gu
{
struct BVHNode : public PxUserAllocated
{
public:
PX_FORCE_INLINE BVHNode() {}
PX_FORCE_INLINE ~BVHNode() {}
PX_FORCE_INLINE PxU32 isLeaf() const { return mData&1; }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32* base) const { return base + (mData>>5); }
PX_FORCE_INLINE PxU32* getPrimitives(PxU32* base) { return base + (mData>>5); }
PX_FORCE_INLINE PxU32 getPrimitiveIndex() const { return mData>>5; }
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return (mData>>1)&15; }
PX_FORCE_INLINE PxU32 getPosIndex() const { return mData>>1; }
PX_FORCE_INLINE PxU32 getNegIndex() const { return (mData>>1) + 1; }
PX_FORCE_INLINE const BVHNode* getPos(const BVHNode* base) const { return base + (mData>>1); }
PX_FORCE_INLINE const BVHNode* getNeg(const BVHNode* base) const { const BVHNode* P = getPos(base); return P ? P+1 : NULL; }
PX_FORCE_INLINE BVHNode* getPos(BVHNode* base) { return base + (mData >> 1); }
PX_FORCE_INLINE BVHNode* getNeg(BVHNode* base) { BVHNode* P = getPos(base); return P ? P + 1 : NULL; }
PX_FORCE_INLINE PxU32 getNbRuntimePrimitives() const { return (mData>>1)&15; }
PX_FORCE_INLINE void setNbRunTimePrimitives(PxU32 val)
{
PX_ASSERT(val<16);
PxU32 data = mData & ~(15<<1);
data |= val<<1;
mData = data;
}
PX_FORCE_INLINE void getAABBCenterExtentsV(Vec3V* center, Vec3V* extents) const
{
const Vec4V minV = V4LoadU(&mBV.minimum.x);
const Vec4V maxV = V4LoadU(&mBV.maximum.x);
const float half = 0.5f;
const FloatV halfV = FLoad(half);
*extents = Vec3V_From_Vec4V(V4Scale(V4Sub(maxV, minV), halfV));
*center = Vec3V_From_Vec4V(V4Scale(V4Add(maxV, minV), halfV));
}
PX_FORCE_INLINE void getAABBCenterExtentsV2(Vec3V* center, Vec3V* extents) const
{
const Vec4V minV = V4LoadU(&mBV.minimum.x);
const Vec4V maxV = V4LoadU(&mBV.maximum.x);
*extents = Vec3V_From_Vec4V(V4Sub(maxV, minV));
*center = Vec3V_From_Vec4V(V4Add(maxV, minV));
}
PxBounds3 mBV; // Global bounding-volume enclosing all the node-related primitives
PxU32 mData; // 27 bits node or prim index|4 bits #prims|1 bit leaf
};
} // namespace Gu
}
#endif // GU_AABBTREE_NODE_H
| 4,429 | C | 43.747474 | 129 | 0.67487 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuCCTSweepTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxSphereGeometry.h"
#include "GuSweepTests.h"
#include "GuHeightFieldUtil.h"
#include "GuEntityReport.h"
#include "GuDistanceSegmentBox.h"
#include "GuDistancePointBox.h"
#include "GuSweepBoxSphere.h"
#include "GuSweepCapsuleBox.h"
#include "GuSweepBoxBox.h"
#include "GuSweepBoxTriangle_SAT.h"
#include "GuSweepTriangleUtils.h"
#include "GuInternal.h"
#include "foundation/PxVecMath.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace aos;
static const bool gValidateBoxRadiusComputation = false;
///////////////////////////////////////////
bool sweepCapsule_BoxGeom_Precise(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
PX_UNUSED(threadContext);
PX_UNUSED(inflation);
PX_UNUSED(capsulePose_);
PX_UNUSED(capsuleGeom_);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
if (lss.p0 == lss.p1) // The capsule is actually a sphere
{
//TODO: Check if this is really faster than using a "sphere-aware" version of sweepCapsuleBox
Box box; buildFrom(box, pose.p, boxGeom.halfExtents, pose.q);
if(!sweepBoxSphere(box, lss.radius, lss.p0, unitDir, distance, sweepHit.distance, sweepHit.normal, hitFlags))
return false;
sweepHit.normal = -sweepHit.normal;
sweepHit.flags = PxHitFlag::eNORMAL;
if(hitFlags & PxHitFlag::ePOSITION && sweepHit.distance!=0.0f)
{
// The sweep test doesn't compute the impact point automatically, so we have to do it here.
const PxVec3 newSphereCenter = lss.p0 + unitDir * sweepHit.distance;
PxVec3 closest;
const PxReal d = distancePointBoxSquared(newSphereCenter, box.center, box.extents, box.rot, &closest);
PX_UNUSED(d);
// Compute point on the box, after sweep
closest = box.rotate(closest);
sweepHit.position = closest + box.center;
sweepHit.flags |= PxHitFlag::ePOSITION;
}
}
else
{
if(!sweepCapsuleBox(lss, pose, boxGeom.halfExtents, unitDir, distance, sweepHit.position, sweepHit.distance, sweepHit.normal, hitFlags))
return false;
sweepHit.flags = PxHitFlag::eNORMAL;
if((hitFlags & PxHitFlag::ePOSITION) && sweepHit.distance!=0.0f)
{
// The sweep test doesn't compute the impact point automatically, so we have to do it here.
Capsule movedCaps = lss;
movedCaps.p0 += unitDir * sweepHit.distance;
movedCaps.p1 += unitDir * sweepHit.distance;
Box box;
buildFrom(box, pose.p, boxGeom.halfExtents, pose.q);
PxVec3 closest;
const PxReal d = distanceSegmentBoxSquared(movedCaps, box, NULL, &closest);
PX_UNUSED(d);
// Compute point on the box, after sweep
closest = pose.q.rotate(closest);
sweepHit.position = closest + pose.p;
sweepHit.flags |= PxHitFlag::ePOSITION;
}
}
return true;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool sweepBox_SphereGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
// PT: move to relative space
const Box relBox(box.center - pose.p, box.extents, box.rot);
const PxReal sphereRadius = sphereGeom.radius + inflation;
if(!sweepBoxSphere(relBox, sphereRadius, PxVec3(0), -unitDir, distance, sweepHit.distance, sweepHit.normal, hitFlags))
return false;
sweepHit.flags = PxHitFlag::eNORMAL;
if((hitFlags & PxHitFlag::ePOSITION) && sweepHit.distance!=0.0f)
{
// The sweep test doesn't compute the impact point automatically, so we have to do it here.
const PxVec3 motion = sweepHit.distance * unitDir;
const PxVec3 newSphereCenter = - motion;
PxVec3 closest;
const PxReal d = distancePointBoxSquared(newSphereCenter, relBox.center, relBox.extents, relBox.rot, &closest);
PX_UNUSED(d);
// Compute point on the box, after sweep
sweepHit.position = relBox.rotate(closest) + box.center + motion; // PT: undo move to local space here
sweepHit.flags |= PxHitFlag::ePOSITION;
}
return true;
}
bool sweepBox_CapsuleGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
PX_UNUSED(inflation);
PX_UNUSED(boxGeom_);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
// PT: move to relative space
const PxVec3 delta = box.center - pose.p;
Box relBox(delta, box.extents, box.rot);
Capsule capsule;
const PxVec3 halfHeightVector = getCapsuleHalfHeightVector(pose, capsuleGeom);
capsule.p0 = halfHeightVector;
capsule.p1 = -halfHeightVector;
capsule.radius = capsuleGeom.radius;
// PT: TODO: remove this. We convert to PxTansform here but inside sweepCapsuleBox we convert back to a matrix.
const PxTransform boxWorldPose(delta, boxPose_.q);
PxVec3 n;
if(!sweepCapsuleBox(capsule, boxWorldPose, relBox.extents, -unitDir, distance, sweepHit.position, sweepHit.distance, n, hitFlags))
return false;
sweepHit.normal = -n;
sweepHit.flags = PxHitFlag::eNORMAL;
if((hitFlags & PxHitFlag::ePOSITION) && sweepHit.distance!=0.0f)
{
// The sweep test doesn't compute the impact point automatically, so we have to do it here.
relBox.center += (unitDir * sweepHit.distance);
PxVec3 closest;
const PxReal d = distanceSegmentBoxSquared(capsule, relBox, NULL, &closest);
PX_UNUSED(d);
// Compute point on the box, after sweep
sweepHit.position = relBox.transform(closest) + pose.p; // PT: undo move to local space here
sweepHit.flags |= PxHitFlag::ePOSITION;
}
return true;
}
bool sweepBox_BoxGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
PX_UNUSED(threadContext);
PX_UNUSED(inflation);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
// PT: move to local space
const Box relBox(box.center - pose.p, box.extents, box.rot);
Box staticBox; buildFrom(staticBox, PxVec3(0), boxGeom.halfExtents, pose.q);
if(!sweepBoxBox(relBox, staticBox, unitDir, distance, hitFlags, sweepHit))
return false;
if(sweepHit.distance!=0.0f)
sweepHit.position += pose.p; // PT: undo move to local space
return true;
}
// PT: test: new version for CCT, based on code for general sweeps. Just to check it works or not with rotations
// TODO: refactor this and the similar code in sweptBox for box-vs-mesh. Not so easy though.
static bool sweepBoxVsTriangles(PxU32 nbTris, const PxTriangle* triangles, const Box& box, const PxVec3& unitDir, const PxReal distance, PxGeomSweepHit& sweepHit,
PxHitFlags hitFlags, bool isDoubleSided, const PxU32* cachedIndex)
{
if(!nbTris)
return false;
const bool meshBothSides = hitFlags & PxHitFlag::eMESH_BOTH_SIDES;
const bool doBackfaceCulling = !isDoubleSided && !meshBothSides;
// Move to AABB space
PxMat34 worldToBox;
computeWorldToBoxMatrix(worldToBox, box);
const PxVec3 localDir = worldToBox.rotate(unitDir);
const PxVec3 localMotion = localDir * distance;
bool status = false;
sweepHit.distance = distance; //was PX_MAX_F32, but that may trigger an assert in the caller!
const PxVec3 oneOverMotion(
localDir.x!=0.0f ? 1.0f/localMotion.x : 0.0f,
localDir.y!=0.0f ? 1.0f/localMotion.y : 0.0f,
localDir.z!=0.0f ? 1.0f/localMotion.z : 0.0f);
// PT: experimental code, don't clean up before I test it more and validate it
// Project box
/*float boxRadius0 =
PxAbs(dir.x) * box.extents.x
+ PxAbs(dir.y) * box.extents.y
+ PxAbs(dir.z) * box.extents.z;*/
float boxRadius =
PxAbs(localDir.x) * box.extents.x
+ PxAbs(localDir.y) * box.extents.y
+ PxAbs(localDir.z) * box.extents.z;
if(gValidateBoxRadiusComputation) // PT: run this to check the box radius is correctly computed
{
PxVec3 boxVertices2[8];
box.computeBoxPoints(boxVertices2);
float dpmin = FLT_MAX;
float dpmax = -FLT_MAX;
for(int i=0;i<8;i++)
{
const float dp = boxVertices2[i].dot(unitDir);
if(dp<dpmin) dpmin = dp;
if(dp>dpmax) dpmax = dp;
}
const float goodRadius = (dpmax-dpmin)/2.0f;
PX_UNUSED(goodRadius);
}
const float dpc0 = box.center.dot(unitDir);
float localMinDist = 1.0f;
#if PX_DEBUG
PxU32 totalTestsExpected = nbTris;
PxU32 totalTestsReal = 0;
PX_UNUSED(totalTestsExpected);
PX_UNUSED(totalTestsReal);
#endif
const PxU32 idx = cachedIndex ? *cachedIndex : 0;
PxVec3 bestTriNormal(0.0f);
for(PxU32 ii=0;ii<nbTris;ii++)
{
const PxU32 triangleIndex = getTriangleIndex(ii, idx);
const PxTriangle& tri = triangles[triangleIndex];
if(!cullTriangle(tri.verts, unitDir, boxRadius, localMinDist*distance, dpc0))
continue;
#if PX_DEBUG
totalTestsReal++;
#endif
// Move to box space
const PxTriangle currentTriangle(
worldToBox.transform(tri.verts[0]),
worldToBox.transform(tri.verts[1]),
worldToBox.transform(tri.verts[2]));
PxF32 t = PX_MAX_F32; // could be better!
if(triBoxSweepTestBoxSpace(currentTriangle, box.extents, localMotion, oneOverMotion, localMinDist, t, doBackfaceCulling))
{
if(t < localMinDist)
{
// PT: test if shapes initially overlap
if(t==0.0f)
return setInitialOverlapResults(sweepHit, unitDir, triangleIndex);
localMinDist = t;
sweepHit.distance = t * distance;
sweepHit.faceIndex = triangleIndex;
status = true;
// PT: TODO: optimize this.... already computed in triBoxSweepTestBoxSpace...
currentTriangle.denormalizedNormal(bestTriNormal);
if(hitFlags & PxHitFlag::eMESH_ANY)
break;
}
}
}
if(status)
{
sweepHit.flags = PxHitFlag::eFACE_INDEX;
// PT: TODO: refactor with computeBoxLocalImpact (TA34704)
if(hitFlags & (PxHitFlag::eNORMAL|PxHitFlag::ePOSITION))
{
const PxTriangle& tri = triangles[sweepHit.faceIndex];
// Move to box space
const PxTriangle currentTriangle(
worldToBox.transform(tri.verts[0]),
worldToBox.transform(tri.verts[1]),
worldToBox.transform(tri.verts[2]));
computeBoxTriImpactData(sweepHit.position, sweepHit.normal, box.extents, localDir, currentTriangle, sweepHit.distance);
if(hitFlags & PxHitFlag::eNORMAL)
{
PxVec3 localNormal = sweepHit.normal; // PT: both local space & local variable
localNormal.normalize();
if(shouldFlipNormal(localNormal, meshBothSides, isDoubleSided, bestTriNormal, localDir))
localNormal = -localNormal;
sweepHit.normal = box.rotate(localNormal);
sweepHit.flags |= PxHitFlag::eNORMAL;
}
if(hitFlags & PxHitFlag::ePOSITION)
{
sweepHit.position = box.rotate(sweepHit.position) + box.center;
sweepHit.flags |= PxHitFlag::ePOSITION;
}
}
}
return status;
}
bool sweepBox_HeightFieldGeom_Precise(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eHEIGHTFIELD);
PX_UNUSED(threadContext);
PX_UNUSED(inflation);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
const PxHeightFieldGeometry& heightFieldGeom = static_cast<const PxHeightFieldGeometry&>(geom);
// Compute swept box
Box sweptBox;
computeSweptBox(sweptBox, box.extents, box.center, box.rot, unitDir, distance);
//### Temp hack until we can directly collide the OBB against the HF
const PxTransform sweptBoxTR = sweptBox.getTransform();
const PxBounds3 bounds = PxBounds3::poseExtent(sweptBoxTR, sweptBox.extents);
sweepHit.distance = PX_MAX_F32;
struct LocalReport : OverlapReport
{
virtual bool reportTouchedTris(PxU32 nb, const PxU32* indices)
{
for(PxU32 i=0; i<nb; i++)
{
const PxU32 triangleIndex = indices[i];
PxTriangle currentTriangle; // in world space
mHFUtil->getTriangle(*mPose, currentTriangle, NULL, NULL, triangleIndex, true, true);
PxGeomSweepHit sweepHit_;
const bool b = sweepBoxVsTriangles(1, ¤tTriangle, mBox, mDir, mDist, sweepHit_, mHitFlags, mIsDoubleSided, NULL);
if(b && sweepHit_.distance<mHit->distance)
{
*mHit = sweepHit_;
mHit->faceIndex = triangleIndex;
mStatus = true;
}
}
return true;
}
const HeightFieldUtil* mHFUtil;
const PxTransform* mPose;
PxGeomSweepHit* mHit;
bool mStatus;
Box mBox;
PxVec3 mDir;
float mDist;
PxHitFlags mHitFlags;
bool mIsDoubleSided;
} myReport;
HeightFieldUtil hfUtil(heightFieldGeom);
myReport.mBox = box;
myReport.mDir = unitDir;
myReport.mDist = distance;
myReport.mHitFlags = hitFlags;
myReport.mHFUtil = &hfUtil;
myReport.mStatus = false;
myReport.mPose = &pose;
myReport.mHit = &sweepHit;
const PxU32 meshBothSides = hitFlags & PxHitFlag::eMESH_BOTH_SIDES;
myReport.mIsDoubleSided = (heightFieldGeom.heightFieldFlags & PxMeshGeometryFlag::eDOUBLE_SIDED) || meshBothSides;
hfUtil.overlapAABBTriangles(pose, bounds, myReport);
return myReport.mStatus;
}
bool Gu::sweepBoxTriangles_Precise(GU_SWEEP_TRIANGLES_FUNC_PARAMS(PxBoxGeometry))
{
PX_UNUSED(inflation);
Box box;
buildFrom(box, pose.p, geom.halfExtents, pose.q);
return sweepBoxVsTriangles(nbTris, triangles, box, unitDir, distance, hit, hitFlags, doubleSided, cachedIndex);
}
| 14,727 | C++ | 32.096629 | 162 | 0.723705 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSweepMTD.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxConvexMeshGeometry.h"
#include "GuHeightFieldUtil.h"
#include "GuEntityReport.h"
#include "GuConvexMesh.h"
#include "GuSweepSharedTests.h"
#include "GuConvexUtilsInternal.h"
#include "GuTriangleMesh.h"
#include "GuVecBox.h"
#include "GuVecTriangle.h"
#include "GuVecConvexHullNoScale.h"
#include "GuMidphaseInterface.h"
#include "GuPCMContactConvexCommon.h"
#include "GuSweepMTD.h"
#include "GuPCMShapeConvex.h"
#include "GuDistanceSegmentSegment.h"
#include "GuDistancePointSegment.h"
#include "GuInternal.h"
#include "GuConvexEdgeFlags.h"
#include "GuMTD.h"
#include "CmMatrix34.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace aos;
#define BATCH_TRIANGLE_NUMBER 32u
struct MTDTriangle : public PxTriangle
{
public:
PxU8 extraTriData;//active edge flag data
};
struct MeshMTDGenerationCallback : MeshHitCallback<PxGeomRaycastHit>
{
public:
PxArray<PxU32>& container;
MeshMTDGenerationCallback(PxArray<PxU32>& tempContainer)
: MeshHitCallback<PxGeomRaycastHit>(CallbackMode::eMULTIPLE), container(tempContainer)
{
}
virtual PxAgain processHit(
const PxGeomRaycastHit& hit, const PxVec3&, const PxVec3&, const PxVec3&, PxReal&, const PxU32*)
{
container.pushBack(hit.faceIndex);
return true;
}
void operator=(const MeshMTDGenerationCallback&) {}
};
static bool getMTDPerTriangle(const MeshPersistentContact* manifoldContacts, const PxU32 numContacts, const PxU32 triangleIndex, Vec3V& normal, Vec3V& closestA, Vec3V& closestB, PxU32& faceIndex, FloatV& deepestPen)
{
FloatV deepest = V4GetW(manifoldContacts[0].mLocalNormalPen);
PxU32 index = 0;
for(PxU32 k=1; k<numContacts; ++k)
{
const FloatV pen = V4GetW(manifoldContacts[k].mLocalNormalPen);
if(FAllGrtr(deepest, pen))
{
deepest = pen;
index = k;
}
}
if(FAllGrtr(deepestPen, deepest))
{
PX_ASSERT(triangleIndex == manifoldContacts[index].mFaceIndex);
faceIndex = triangleIndex;
deepestPen = deepest;
normal = Vec3V_From_Vec4V(manifoldContacts[index].mLocalNormalPen);
closestA = manifoldContacts[index].mLocalPointB;
closestB = manifoldContacts[index].mLocalPointA;
return true;
}
return false;
}
static void midPhaseQuery(const PxTriangleMeshGeometry& meshGeom, const PxTransform& pose, const Box& bound, PxArray<PxU32>& tempContainer)
{
TriangleMesh* meshData = static_cast<TriangleMesh*>(meshGeom.triangleMesh);
Box vertexSpaceBox;
computeVertexSpaceOBB(vertexSpaceBox, bound, pose, meshGeom.scale);
MeshMTDGenerationCallback callback(tempContainer);
Midphase::intersectOBB(meshData, vertexSpaceBox, callback, true);
}
// PT: TODO: refactor with EntityReportContainerCallback
struct MidPhaseQueryLocalReport : OverlapReport
{
MidPhaseQueryLocalReport(PxArray<PxU32>& _container) : container(_container)
{
}
virtual bool reportTouchedTris(PxU32 nb, const PxU32* indices)
{
for(PxU32 i=0; i<nb; i++)
container.pushBack(indices[i]);
return true;
}
PxArray<PxU32>& container;
private:
MidPhaseQueryLocalReport operator=(MidPhaseQueryLocalReport& report);
};
static void midPhaseQuery(const HeightFieldUtil& hfUtil, const PxTransform& pose, const PxBounds3& bounds, PxArray<PxU32>& tempContainer)
{
MidPhaseQueryLocalReport localReport(tempContainer);
hfUtil.overlapAABBTriangles(pose, bounds, localReport);
}
static bool calculateMTD( const CapsuleV& capsuleV, const FloatVArg inflatedRadiusV, const bool isDoubleSide, const MTDTriangle* triangles, const PxU32 nbTriangles, const PxU32 startIndex, MeshPersistentContact* manifoldContacts,
PxU32& numContacts, Vec3V& normal, Vec3V& closestA, Vec3V& closestB, PxU32& faceIndex, FloatV& mtd)
{
const FloatV zero = FZero();
bool hadContacts = false;
FloatV deepestPen = mtd;
for(PxU32 j=0; j<nbTriangles; ++j)
{
numContacts = 0;
const MTDTriangle& curTri = triangles[j];
TriangleV triangleV;
triangleV.verts[0] = V3LoadU(curTri.verts[0]);
triangleV.verts[1] = V3LoadU(curTri.verts[1]);
triangleV.verts[2] = V3LoadU(curTri.verts[2]);
const PxU8 triFlag = curTri.extraTriData;
const Vec3V triangleNormal = triangleV.normal();
const Vec3V v = V3Sub(capsuleV.getCenter(), triangleV.verts[0]);
const FloatV dotV = V3Dot(triangleNormal, v);
// Backface culling
const bool culled = !isDoubleSide && (FAllGrtr(zero, dotV));
if(culled)
continue;
PCMCapsuleVsMeshContactGeneration::processTriangle(triangleV, j+startIndex, capsuleV, inflatedRadiusV, triFlag, manifoldContacts, numContacts);
if(numContacts ==0)
continue;
hadContacts = true;
getMTDPerTriangle(manifoldContacts, numContacts, j + startIndex, normal, closestA, closestB, faceIndex, deepestPen);
}
mtd = deepestPen;
return hadContacts;
}
static PX_FORCE_INLINE bool finalizeMTD(PxGeomSweepHit& hit, const Vec3VArg translationV, const Vec3VArg posV, PxU32 triangleIndex, bool foundInitial)
{
if(foundInitial)
{
const FloatV translationF = V3Length(translationV);
const FloatV distV = FNeg(translationF);
const BoolV con = FIsGrtr(translationF, FZero());
const Vec3V nrm = V3Sel(con, V3ScaleInv(translationV, translationF), V3Zero());
FStore(distV, &hit.distance);
V3StoreU(posV, hit.position);
V3StoreU(nrm, hit.normal);
hit.faceIndex = triangleIndex;
}
return foundInitial;
}
bool physx::Gu::computeCapsule_TriangleMeshMTD( const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, CapsuleV& capsuleV, PxReal inflatedRadius,
bool isDoubleSided, PxGeomSweepHit& hit)
{
TriangleMesh* triMesh = static_cast<TriangleMesh*>(triMeshGeom.triangleMesh);
const PxU8* extraTrigData = triMesh->getExtraTrigData();
const bool flipsNormal = triMeshGeom.scale.hasNegativeDeterminant();
//inflated the capsule by 15% in case of some disagreement between sweep and mtd calculation. If sweep said initial overlap, but mtd has a positive separation,
//we are still be able to return a valid normal but we should zero the distance.
const FloatV inflatedRadiusV = FLoad(inflatedRadius*1.15f);
const PxMat34 vertexToWorldSkew = pose * triMeshGeom.scale;
const Vec3V zeroV = V3Zero();
Vec3V closestA = zeroV, closestB = zeroV, normal = zeroV;
/////
MeshPersistentContact manifoldContacts[64];
PxU32 numContacts = 0;
PxArray<PxU32> tempContainer;
tempContainer.reserve(128);
PxU32 triangleIndex = 0xfffffff;
Vec3V translation = zeroV;
bool foundInitial = false;
const PxU32 iterations = 4;
/////
for(PxU32 i=0; i<iterations; ++i)
{
tempContainer.forceSize_Unsafe(0);
{
Capsule inflatedCapsule;
V3StoreU(capsuleV.p0, inflatedCapsule.p0);
V3StoreU(capsuleV.p1, inflatedCapsule.p1);
inflatedCapsule.radius = inflatedRadius;
Box capsuleBox;
computeBoxAroundCapsule(inflatedCapsule, capsuleBox);
midPhaseQuery(triMeshGeom, pose, capsuleBox, tempContainer);
}
// Get results
const PxU32 nbTriangles = tempContainer.size();
if(!nbTriangles)
break;
FloatV mtd;
{
bool hadContacts = false;
const PxU32 nbBatches = (nbTriangles + BATCH_TRIANGLE_NUMBER - 1)/BATCH_TRIANGLE_NUMBER;
mtd = FMax();
MTDTriangle triangles[BATCH_TRIANGLE_NUMBER];
for(PxU32 a = 0; a < nbBatches; ++a)
{
const PxU32 startIndex = a * BATCH_TRIANGLE_NUMBER;
const PxU32 nbTrigs = PxMin(nbTriangles - startIndex, BATCH_TRIANGLE_NUMBER);
for(PxU32 k=0; k<nbTrigs; k++)
{
//triangle world space
const PxU32 currentTriangleIndex = tempContainer[startIndex+k];
triMesh->computeWorldTriangle(triangles[k], currentTriangleIndex, vertexToWorldSkew, flipsNormal);
triangles[k].extraTriData = getConvexEdgeFlags(extraTrigData, currentTriangleIndex);
}
//ML: mtd has back face culling, so if the capsule's center is below the triangle, we won't generate any contacts
hadContacts = calculateMTD(capsuleV, inflatedRadiusV, isDoubleSided, triangles, nbTrigs, startIndex, manifoldContacts, numContacts, normal, closestA, closestB, triangleIndex, mtd) || hadContacts;
}
if(!hadContacts)
break;
triangleIndex = tempContainer[triangleIndex];
foundInitial = true;
}
//move the capsule to depenetrate it
const FloatV distV = FSub(mtd, capsuleV.radius);
if(FAllGrtr(FZero(), distV))
{
Vec3V center = capsuleV.getCenter();
const Vec3V t = V3Scale(normal, distV);
translation = V3Sub(translation, t);
center = V3Sub(center, t);
capsuleV.setCenter(center);
}
else
{
if(i == 0)
{
//First iteration so keep this normal
hit.distance = 0.0f;
V3StoreU(closestA, hit.position);
V3StoreU(normal, hit.normal);
hit.faceIndex = triangleIndex;
return true;
}
break;
}
}
return finalizeMTD(hit, translation, closestA, triangleIndex, foundInitial);
}
bool physx::Gu::computeCapsule_HeightFieldMTD(const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, CapsuleV& capsuleV, PxReal inflatedRadius, bool isDoubleSided, PxGeomSweepHit& hit)
{
//inflated the capsule by 1% in case of some disagreement between sweep and mtd calculation.If sweep said initial overlap, but mtd has a positive separation,
//we are still be able to return a valid normal but we should zero the distance.
const FloatV inflatedRadiusV = FLoad(inflatedRadius*1.01f);
const HeightFieldUtil hfUtil(heightFieldGeom);
const Vec3V zeroV = V3Zero();
Vec3V closestA = zeroV, closestB = zeroV, normal = zeroV;
/////
MeshPersistentContact manifoldContacts[64];
PxU32 numContacts = 0;
PxArray<PxU32> tempContainer;
tempContainer.reserve(128);
PxU32 triangleIndex = 0xfffffff;
Vec3V translation = zeroV;
bool foundInitial = false;
const PxU32 iterations = 4;
/////
for(PxU32 i=0; i<iterations; ++i)
{
tempContainer.forceSize_Unsafe(0);
{
Capsule inflatedCapsule;
V3StoreU(capsuleV.p0, inflatedCapsule.p0);
V3StoreU(capsuleV.p1, inflatedCapsule.p1);
inflatedCapsule.radius = inflatedRadius;
Box capsuleBox;
computeBoxAroundCapsule(inflatedCapsule, capsuleBox);
const PxTransform capsuleBoxTransform = capsuleBox.getTransform();
const PxBounds3 bounds = PxBounds3::poseExtent(capsuleBoxTransform, capsuleBox.extents);
midPhaseQuery(hfUtil, pose, bounds, tempContainer);
}
// Get results
const PxU32 nbTriangles = tempContainer.size();
if(!nbTriangles)
break;
FloatV mtd;
{
bool hadContacts = false;
const PxU32 nbBatches = (nbTriangles + BATCH_TRIANGLE_NUMBER - 1)/BATCH_TRIANGLE_NUMBER;
mtd = FMax();
MTDTriangle triangles[BATCH_TRIANGLE_NUMBER];
for(PxU32 a = 0; a < nbBatches; ++a)
{
const PxU32 startIndex = a * BATCH_TRIANGLE_NUMBER;
const PxU32 nbTrigs = PxMin(nbTriangles - startIndex, BATCH_TRIANGLE_NUMBER);
for(PxU32 k=0; k<nbTrigs; k++)
{
//triangle vertex space
const PxU32 currentTriangleIndex = tempContainer[startIndex+k];
hfUtil.getTriangle(pose, triangles[k], NULL, NULL, currentTriangleIndex, true);
triangles[k].extraTriData = ETD_CONVEX_EDGE_ALL;
}
//ML: mtd has back face culling, so if the capsule's center is below the triangle, we won't generate any contacts
hadContacts = calculateMTD(capsuleV, inflatedRadiusV, isDoubleSided, triangles, nbTrigs, startIndex, manifoldContacts, numContacts, normal, closestA, closestB, triangleIndex, mtd) || hadContacts;
}
if(!hadContacts)
break;
triangleIndex = tempContainer[triangleIndex];
foundInitial = true;
}
const FloatV distV = FSub(mtd, capsuleV.radius);
if(FAllGrtr(FZero(), distV))
{
//move the capsule to depenetrate it
Vec3V center = capsuleV.getCenter();
const Vec3V t = V3Scale(normal, distV);
translation = V3Sub(translation, t);
center = V3Sub(center, t);
capsuleV.setCenter(center);
}
else
{
if(i == 0)
{
//First iteration so keep this normal
hit.distance = 0.0f;
V3StoreU(closestA, hit.position);
V3StoreU(normal, hit.normal);
hit.faceIndex = triangleIndex;
return true;
}
break;
}
}
return finalizeMTD(hit, translation, closestA, triangleIndex, foundInitial);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static bool calculateMTD( const PolygonalData& polyData, const SupportLocal* polyMap, const PxTransformV& convexTransform, const PxMatTransformV& meshToConvex, bool isDoubleSided, const FloatVArg inflation, const MTDTriangle* triangles, PxU32 nbTriangles, PxU32 startIndex,
MeshPersistentContact* manifoldContacts, PxU32& numContacts, Vec3V& normal, Vec3V& closestA, Vec3V& closestB, PxU32& faceIndex, FloatV& mtd)
{
bool hadContacts = false;
FloatV deepestPen = mtd;
for(PxU32 j=0; j<nbTriangles; ++j)
{
numContacts = 0;
const MTDTriangle& curTri = triangles[j];
const PxU8 triFlag = curTri.extraTriData;
PCMConvexVsMeshContactGeneration::processTriangle(polyData, polyMap, curTri.verts, j+startIndex, triFlag, inflation, isDoubleSided, convexTransform, meshToConvex, manifoldContacts, numContacts);
if(numContacts ==0)
continue;
hadContacts = true;
getMTDPerTriangle(manifoldContacts, numContacts, j+startIndex, normal, closestA, closestB, faceIndex, deepestPen);
}
mtd = deepestPen;
return hadContacts;
}
bool physx::Gu::computeBox_TriangleMeshMTD(const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, const Box& _box, const PxTransform& boxTransform, PxReal inflation, bool isDoubleSided, PxGeomSweepHit& hit)
{
TriangleMesh* triMesh = static_cast<TriangleMesh*>(triMeshGeom.triangleMesh);
const PxU8* extraTrigData = triMesh->getExtraTrigData();
const bool flipsNormal = triMeshGeom.scale.hasNegativeDeterminant();
const Vec3V zeroV = V3Zero();
Vec3V closestA = zeroV, closestB = zeroV, normal = zeroV;
Vec3V worldNormal = zeroV, worldContactA = zeroV;//, worldContactB = zeroV;
Box box = _box;
const QuatV q0 = QuatVLoadU(&boxTransform.q.x);
const Vec3V p0 = V3LoadU(&boxTransform.p.x);
const Vec3V boxExtents = V3LoadU(box.extents);
const FloatV minMargin = CalculateMTDBoxMargin(boxExtents);
const FloatV inflationV = FAdd(FLoad(inflation), minMargin);
PxReal boundInflation;
FStore(inflationV, &boundInflation);
box.extents += PxVec3(boundInflation);
const BoxV boxV(zeroV, boxExtents);
Vec3V boxCenter = V3LoadU(box.center);
//create the polyData based on the original data
PolygonalData polyData;
const PCMPolygonalBox polyBox(_box.extents);
polyBox.getPolygonalData(&polyData);
const Mat33V identity = M33Identity();
const PxMat34 meshToWorldSkew = pose * triMeshGeom.scale;
PxTransformV boxTransformV(p0, q0);//box
/////
MeshPersistentContact manifoldContacts[64];
PxU32 numContacts = 0;
PxArray<PxU32> tempContainer;
tempContainer.reserve(128);
PxU32 triangleIndex = 0xfffffff;
Vec3V translation = zeroV;
bool foundInitial = false;
const PxU32 iterations = 4;
/////
for(PxU32 i=0; i<iterations; ++i)
{
tempContainer.forceSize_Unsafe(0);
{
midPhaseQuery(triMeshGeom, pose, box, tempContainer);
}
// Get results
const PxU32 nbTriangles = tempContainer.size();
if(!nbTriangles)
break;
boxTransformV.p = boxCenter;
SupportLocalImpl<BoxV> boxMap(boxV, boxTransformV, identity, identity, true);
boxMap.setShapeSpaceCenterofMass(zeroV);
// Move to AABB space
PxMat34 WorldToBox;
computeWorldToBoxMatrix(WorldToBox, box);
const PxMat34 meshToBox = WorldToBox*meshToWorldSkew;
const Mat33V rot(V3LoadU(meshToBox.m.column0), V3LoadU(meshToBox.m.column1), V3LoadU(meshToBox.m.column2));
const PxMatTransformV meshToConvex(V3LoadU(meshToBox.p), rot);
FloatV mtd;
{
bool hadContacts = false;
const PxU32 nbBatches = (nbTriangles + BATCH_TRIANGLE_NUMBER - 1)/BATCH_TRIANGLE_NUMBER;
mtd = FMax();
MTDTriangle triangles[BATCH_TRIANGLE_NUMBER];
for(PxU32 a = 0; a < nbBatches; ++a)
{
const PxU32 startIndex = a * BATCH_TRIANGLE_NUMBER;
const PxU32 nbTrigs = PxMin(nbTriangles - startIndex, BATCH_TRIANGLE_NUMBER);
for(PxU32 k=0; k<nbTrigs; k++)
{
//triangle vertex space
const PxU32 currentTriangleIndex = tempContainer[startIndex+k];
triMesh->getLocalTriangle(triangles[k], currentTriangleIndex, flipsNormal);
triangles[k].extraTriData = getConvexEdgeFlags(extraTrigData, currentTriangleIndex);
}
//ML: mtd has back face culling, so if the capsule's center is below the triangle, we won't generate any contacts
hadContacts = calculateMTD(polyData, &boxMap, boxTransformV, meshToConvex, isDoubleSided, inflationV, triangles, nbTrigs, startIndex, manifoldContacts, numContacts, normal, closestA, closestB, triangleIndex, mtd) || hadContacts;
}
if(!hadContacts)
break;
triangleIndex = tempContainer[triangleIndex];
foundInitial = true;
}
const FloatV distV = mtd;
worldNormal = boxTransformV.rotate(normal);
worldContactA = boxTransformV.transform(closestA);
if(FAllGrtr(FZero(), distV))
{
const Vec3V t = V3Scale(worldNormal, mtd);
translation = V3Sub(translation, t);
boxCenter = V3Sub(boxCenter, t);
V3StoreU(boxCenter, box.center);
}
else
{
if(i == 0)
{
//First iteration so keep this normal
hit.distance = 0.0f;
V3StoreU(worldContactA, hit.position);
V3StoreU(worldNormal, hit.normal);
hit.faceIndex = triangleIndex;
return true;
}
break;
}
}
return finalizeMTD(hit, translation, worldContactA, triangleIndex, foundInitial);
}
bool physx::Gu::computeBox_HeightFieldMTD(const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, const Box& _box, const PxTransform& boxTransform, PxReal inflation, bool isDoubleSided, PxGeomSweepHit& hit)
{
const HeightFieldUtil hfUtil(heightFieldGeom);
const Vec3V zeroV = V3Zero();
Vec3V closestA = zeroV, closestB = zeroV, normal = zeroV;
Vec3V worldNormal = zeroV, worldContactA = zeroV;//, worldContactB = zeroV;
Box box = _box;
const QuatV q0 = QuatVLoadU(&boxTransform.q.x);
const Vec3V p0 = V3LoadU(&boxTransform.p.x);
const Vec3V boxExtents = V3LoadU(box.extents);
const FloatV minMargin = CalculateMTDBoxMargin(boxExtents);
const FloatV inflationV = FAdd(FLoad(inflation), minMargin);
//const FloatV inflationV = FLoad(inflation);
PxReal boundInflation;
FStore(inflationV, &boundInflation);
box.extents += PxVec3(boundInflation);
const BoxV boxV(zeroV, boxExtents);
Vec3V boxCenter = V3LoadU(box.center);
//create the polyData based on the original box
PolygonalData polyData;
const PCMPolygonalBox polyBox(_box.extents);
polyBox.getPolygonalData(&polyData);
const Mat33V identity = M33Identity();
const Matrix34FromTransform meshToWorldSkew(pose);
PxTransformV boxTransformV(p0, q0);//box
/////
MeshPersistentContact manifoldContacts[64];
PxU32 numContacts = 0;
PxArray<PxU32> tempContainer;
tempContainer.reserve(128);
PxU32 triangleIndex = 0xfffffff;
Vec3V translation = zeroV;
bool foundInitial = false;
const PxU32 iterations = 4;
/////
for(PxU32 i=0; i<iterations; ++i)
{
tempContainer.forceSize_Unsafe(0);
{
const PxBounds3 bounds = PxBounds3::poseExtent(box.getTransform(), box.extents);
midPhaseQuery(hfUtil, pose, bounds, tempContainer);
}
// Get results
const PxU32 nbTriangles = tempContainer.size();
if(!nbTriangles)
break;
boxTransformV.p = boxCenter;
SupportLocalImpl<BoxV> boxMap(boxV, boxTransformV, identity, identity, true);
boxMap.setShapeSpaceCenterofMass(zeroV);
// Move to AABB space
PxMat34 WorldToBox;
computeWorldToBoxMatrix(WorldToBox, box);
const PxMat34 meshToBox = WorldToBox*meshToWorldSkew;
const Mat33V rot(V3LoadU(meshToBox.m.column0), V3LoadU(meshToBox.m.column1), V3LoadU(meshToBox.m.column2));
const PxMatTransformV meshToConvex(V3LoadU(meshToBox.p), rot);
FloatV mtd;
{
bool hadContacts = false;
const PxU32 nbBatches = (nbTriangles + BATCH_TRIANGLE_NUMBER - 1)/BATCH_TRIANGLE_NUMBER;
mtd = FMax();
MTDTriangle triangles[BATCH_TRIANGLE_NUMBER];
for(PxU32 a = 0; a < nbBatches; ++a)
{
const PxU32 startIndex = a * BATCH_TRIANGLE_NUMBER;
const PxU32 nbTrigs = PxMin(nbTriangles - startIndex, BATCH_TRIANGLE_NUMBER);
for(PxU32 k=0; k<nbTrigs; k++)
{
//triangle vertex space
const PxU32 currentTriangleIndex = tempContainer[startIndex+k];
hfUtil.getTriangle(pose, triangles[k], NULL, NULL, currentTriangleIndex, false, false);
triangles[k].extraTriData = ETD_CONVEX_EDGE_ALL;
}
//ML: mtd has back face culling, so if the box's center is below the triangle, we won't generate any contacts
hadContacts = calculateMTD(polyData, &boxMap, boxTransformV, meshToConvex, isDoubleSided, inflationV, triangles, nbTrigs, startIndex, manifoldContacts, numContacts, normal, closestA, closestB, triangleIndex, mtd) || hadContacts;
}
if(!hadContacts)
break;
triangleIndex = tempContainer[triangleIndex];
foundInitial = true;
}
const FloatV distV = mtd;
worldNormal = boxTransformV.rotate(normal);
worldContactA = boxTransformV.transform(closestA);
if(FAllGrtr(FZero(), distV))
{
//worldContactB = boxTransformV.transform(closestB);
const Vec3V t = V3Scale(worldNormal, mtd);
translation = V3Sub(translation, t);
boxCenter = V3Sub(boxCenter, t);
V3StoreU(boxCenter, box.center);
}
else
{
if(i == 0)
{
//First iteration so keep this normal
hit.distance = 0.0f;
V3StoreU(worldContactA, hit.position);
V3StoreU(worldNormal, hit.normal);
hit.faceIndex = triangleIndex;
return true;
}
break;
}
}
return finalizeMTD(hit, translation, worldContactA, triangleIndex, foundInitial);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computeConvex_TriangleMeshMTD( const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexPose, PxReal inflation,
bool isDoubleSided, PxGeomSweepHit& hit)
{
const Vec3V zeroV = V3Zero();
TriangleMesh* triMesh = static_cast<TriangleMesh*>(triMeshGeom.triangleMesh);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
const PxU8* extraTrigData = triMesh->getExtraTrigData();
const bool flipsNormal = triMeshGeom.scale.hasNegativeDeterminant();
ConvexHullData* hullData = &cm->getHull();
const bool idtScaleConvex = convexGeom.scale.isIdentity();
FastVertex2ShapeScaling convexScaling;
if(!idtScaleConvex)
convexScaling.init(convexGeom.scale);
const PxVec3 _shapeSpaceCenterOfMass = convexScaling * hullData->mCenterOfMass;
const Vec3V shapeSpaceCenterOfMass = V3LoadU(_shapeSpaceCenterOfMass);
const QuatV q0 = QuatVLoadU(&convexPose.q.x);
const Vec3V p0 = V3LoadU(&convexPose.p.x);
PxTransformV convexTransformV(p0, q0);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const ConvexHullV convexHull(hullData, V3Zero(), vScale, vQuat, idtScaleConvex);
PX_ALIGN(16, PxU8 convexBuff[sizeof(SupportLocalImpl<ConvexHullV>)]);
const FloatV convexMargin = CalculateMTDConvexMargin(hullData, vScale);
const FloatV inflationV = FAdd(FLoad(inflation), convexMargin);
PxReal boundInflation;
FStore(inflationV, &boundInflation);
Vec3V closestA = zeroV, closestB = zeroV, normal = zeroV;
const PxMat34 meshToWorldSkew = pose * triMeshGeom.scale;
PolygonalData polyData;
getPCMConvexData(convexHull, idtScaleConvex, polyData);
Vec3V center = p0;
PxTransform tempConvexPose = convexPose;
Vec3V worldNormal = zeroV, worldContactA = zeroV;//, worldContactB = zeroV;
/////
MeshPersistentContact manifoldContacts[64];
PxU32 numContacts = 0;
PxArray<PxU32> tempContainer;
tempContainer.reserve(128);
PxU32 triangleIndex = 0xfffffff;
Vec3V translation = zeroV;
bool foundInitial = false;
const PxU32 iterations = 2; // PT: TODO: why 2 here instead of 4?
/////
for(PxU32 i=0; i<iterations; ++i)
{
tempContainer.forceSize_Unsafe(0);
SupportLocal* convexMap;
{
//ML:: construct convex hull data
V3StoreU(center, tempConvexPose.p);
convexTransformV.p = center;
convexMap = idtScaleConvex ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(convexBuff, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHull), convexTransformV, convexHull.vertex2Shape, convexHull.shape2Vertex, idtScaleConvex)) :
static_cast<SupportLocal*>(PX_PLACEMENT_NEW(convexBuff, SupportLocalImpl<ConvexHullV>)(convexHull, convexTransformV, convexHull.vertex2Shape, convexHull.shape2Vertex, idtScaleConvex));
convexMap->setShapeSpaceCenterofMass(shapeSpaceCenterOfMass);
Box hullOBB;
computeOBBAroundConvex(hullOBB, convexGeom, cm, tempConvexPose);
hullOBB.extents += PxVec3(boundInflation);
midPhaseQuery(triMeshGeom, pose, hullOBB, tempContainer);
}
// Get results
const PxU32 nbTriangles = tempContainer.size();
if(!nbTriangles)
break;
// Move to AABB space
const Matrix34FromTransform worldToConvex(tempConvexPose.getInverse());
const PxMat34 meshToConvex = worldToConvex*meshToWorldSkew;
const Mat33V rot(V3LoadU(meshToConvex.m.column0), V3LoadU(meshToConvex.m.column1), V3LoadU(meshToConvex.m.column2));
const PxMatTransformV meshToConvexV(V3LoadU(meshToConvex.p), rot);
FloatV mtd;
{
bool hadContacts = false;
const PxU32 nbBatches = (nbTriangles + BATCH_TRIANGLE_NUMBER - 1)/BATCH_TRIANGLE_NUMBER;
mtd = FMax();
MTDTriangle triangles[BATCH_TRIANGLE_NUMBER];
for(PxU32 a = 0; a < nbBatches; ++a)
{
const PxU32 startIndex = a * BATCH_TRIANGLE_NUMBER;
const PxU32 nbTrigs = PxMin(nbTriangles - startIndex, BATCH_TRIANGLE_NUMBER);
for(PxU32 k=0; k<nbTrigs; k++)
{
//triangle vertex space
const PxU32 currentTriangleIndex = tempContainer[startIndex+k];
triMesh->getLocalTriangle(triangles[k], currentTriangleIndex, flipsNormal);
triangles[k].extraTriData = getConvexEdgeFlags(extraTrigData, currentTriangleIndex);
}
//ML: mtd has back face culling, so if the capsule's center is below the triangle, we won't generate any contacts
hadContacts = calculateMTD(polyData, convexMap, convexTransformV, meshToConvexV, isDoubleSided, inflationV, triangles, nbTrigs, startIndex, manifoldContacts, numContacts, normal, closestA, closestB, triangleIndex, mtd) || hadContacts;
}
if(!hadContacts)
break;
triangleIndex = tempContainer[triangleIndex];
foundInitial = true;
}
const FloatV distV = mtd;
worldNormal = convexTransformV.rotate(normal);
worldContactA = convexTransformV.transform(closestA);
if(FAllGrtr(FZero(), distV))
{
const Vec3V t = V3Scale(worldNormal, mtd);
translation = V3Sub(translation, t);
center = V3Sub(center, t);
}
else
{
if(i == 0)
{
//First iteration so keep this normal
hit.distance = 0.0f;
V3StoreU(worldContactA, hit.position);
V3StoreU(worldNormal, hit.normal);
hit.faceIndex = triangleIndex;
return true;
}
break;
}
}
return finalizeMTD(hit, translation, worldContactA, triangleIndex, foundInitial);
}
bool physx::Gu::computeConvex_HeightFieldMTD(const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexPose, PxReal inflation, bool isDoubleSided, PxGeomSweepHit& hit)
{
const HeightFieldUtil hfUtil(heightFieldGeom);
const Vec3V zeroV = V3Zero();
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &cm->getHull();
const bool idtScaleConvex = convexGeom.scale.isIdentity();
FastVertex2ShapeScaling convexScaling;
if(!idtScaleConvex)
convexScaling.init(convexGeom.scale);
const PxVec3 _shapeSpaceCenterOfMass = convexScaling * hullData->mCenterOfMass;
const Vec3V shapeSpaceCenterOfMass = V3LoadU(_shapeSpaceCenterOfMass);
const QuatV q0 = QuatVLoadU(&convexPose.q.x);
const Vec3V p0 = V3LoadU(&convexPose.p.x);
PxTransformV convexTransformV(p0, q0);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, idtScaleConvex);
PX_ALIGN(16, PxU8 convexBuff[sizeof(SupportLocalImpl<ConvexHullV>)]);
const FloatV convexMargin = CalculateMTDConvexMargin(hullData, vScale);
const FloatV inflationV = FAdd(FLoad(inflation), convexMargin);
PxReal boundInflation;
FStore(inflationV, &boundInflation);
Vec3V closestA = zeroV, closestB = zeroV, normal = zeroV;
Vec3V worldNormal = zeroV, worldContactA = zeroV;//, worldContactB = zeroV;
PolygonalData polyData;
getPCMConvexData(convexHull, idtScaleConvex, polyData);
Vec3V center = p0;
PxTransform tempConvexPose = convexPose;
const Matrix34FromTransform meshToWorldSkew(pose);
/////
MeshPersistentContact manifoldContacts[64];
PxU32 numContacts = 0;
PxArray<PxU32> tempContainer;
tempContainer.reserve(128);
PxU32 triangleIndex = 0xfffffff;
Vec3V translation = zeroV;
bool foundInitial = false;
const PxU32 iterations = 2; // PT: TODO: why 2 here instead of 4?
/////
for(PxU32 i=0; i<iterations; ++i)
{
tempContainer.forceSize_Unsafe(0);
SupportLocal* convexMap;
{
//ML:: construct convex hull data
V3StoreU(center, tempConvexPose.p);
convexTransformV.p = center;
convexMap = idtScaleConvex ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(convexBuff, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHull), convexTransformV, convexHull.vertex2Shape, convexHull.shape2Vertex, idtScaleConvex)) :
static_cast<SupportLocal*>(PX_PLACEMENT_NEW(convexBuff, SupportLocalImpl<ConvexHullV>)(convexHull, convexTransformV, convexHull.vertex2Shape, convexHull.shape2Vertex, idtScaleConvex));
convexMap->setShapeSpaceCenterofMass(shapeSpaceCenterOfMass);
Box hullOBB;
computeOBBAroundConvex(hullOBB, convexGeom, cm, tempConvexPose);
hullOBB.extents += PxVec3(boundInflation);
const PxBounds3 bounds = PxBounds3::basisExtent(hullOBB.center, hullOBB.rot, hullOBB.extents);
midPhaseQuery(hfUtil, pose, bounds, tempContainer);
}
// Get results
const PxU32 nbTriangles = tempContainer.size();
if(!nbTriangles)
break;
// Move to AABB space
const Matrix34FromTransform worldToConvex(tempConvexPose.getInverse());
const PxMat34 meshToConvex = worldToConvex*meshToWorldSkew;
const Mat33V rot(V3LoadU(meshToConvex.m.column0), V3LoadU(meshToConvex.m.column1), V3LoadU(meshToConvex.m.column2));
const PxMatTransformV meshToConvexV(V3LoadU(meshToConvex.p), rot);
FloatV mtd;
{
bool hadContacts = false;
const PxU32 nbBatches = (nbTriangles + BATCH_TRIANGLE_NUMBER - 1)/BATCH_TRIANGLE_NUMBER;
mtd = FMax();
MTDTriangle triangles[BATCH_TRIANGLE_NUMBER];
for(PxU32 a = 0; a < nbBatches; ++a)
{
const PxU32 startIndex = a * BATCH_TRIANGLE_NUMBER;
const PxU32 nbTrigs = PxMin(nbTriangles - startIndex, BATCH_TRIANGLE_NUMBER);
for(PxU32 k=0; k<nbTrigs; k++)
{
//triangle vertex space
const PxU32 currentTriangleIndex = tempContainer[startIndex+k];
hfUtil.getTriangle(pose, triangles[k], NULL, NULL, currentTriangleIndex, false, false);
triangles[k].extraTriData = ETD_CONVEX_EDGE_ALL;
}
//ML: mtd has back face culling, so if the capsule's center is below the triangle, we won't generate any contacts
hadContacts = calculateMTD(polyData, convexMap, convexTransformV, meshToConvexV, isDoubleSided, inflationV, triangles, nbTrigs, startIndex, manifoldContacts, numContacts, normal, closestA, closestB, triangleIndex, mtd) || hadContacts;
}
if(!hadContacts)
break;
triangleIndex = tempContainer[triangleIndex];
foundInitial = true;
}
const FloatV distV = mtd;
worldNormal = convexTransformV.rotate(normal);
worldContactA = convexTransformV.transform(closestA);
if(FAllGrtr(FZero(), distV))
{
const Vec3V t = V3Scale(worldNormal, mtd);
translation = V3Sub(translation, t);
center = V3Sub(center, t);
}
else
{
if(i == 0)
{
//First iteration so keep this normal
hit.distance = 0.0f;
V3StoreU(worldContactA, hit.position);
V3StoreU(worldNormal, hit.normal);
hit.faceIndex = triangleIndex;
return true;
}
break;
}
}
return finalizeMTD(hit, translation, worldContactA, triangleIndex, foundInitial);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computeSphere_SphereMTD(const Sphere& sphere0, const Sphere& sphere1, PxGeomSweepHit& hit)
{
const PxVec3 delta = sphere1.center - sphere0.center;
const PxReal d2 = delta.magnitudeSquared();
const PxReal radiusSum = sphere0.radius + sphere1.radius;
const PxReal d = manualNormalize(hit.normal, delta, d2);
hit.distance = d - radiusSum;
hit.position = sphere0.center + hit.normal * sphere0.radius;
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computeSphere_CapsuleMTD( const Sphere& sphere, const Capsule& capsule, PxGeomSweepHit& hit)
{
const PxReal radiusSum = sphere.radius + capsule.radius;
PxReal u;
distancePointSegmentSquared(capsule, sphere.center, &u);
const PxVec3 normal = capsule.getPointAt(u) - sphere.center;
const PxReal lenSq = normal.magnitudeSquared();
const PxF32 d = manualNormalize(hit.normal, normal, lenSq);
hit.distance = d - radiusSum;
hit.position = sphere.center + hit.normal * sphere.radius;
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computeCapsule_CapsuleMTD(const Capsule& capsule0, const Capsule& capsule1, PxGeomSweepHit& hit)
{
PxReal s,t;
distanceSegmentSegmentSquared(capsule0, capsule1, &s, &t);
const PxReal radiusSum = capsule0.radius + capsule1.radius;
const PxVec3 pointAtCapsule0 = capsule0.getPointAt(s);
const PxVec3 pointAtCapsule1 = capsule1.getPointAt(t);
const PxVec3 normal = pointAtCapsule0 - pointAtCapsule1;
const PxReal lenSq = normal.magnitudeSquared();
const PxF32 len = manualNormalize(hit.normal, normal, lenSq);
hit.distance = len - radiusSum;
hit.position = pointAtCapsule1 + hit.normal * capsule1.radius;
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computePlane_CapsuleMTD(const PxPlane& plane, const Capsule& capsule, PxGeomSweepHit& hit)
{
const PxReal d0 = plane.distance(capsule.p0);
const PxReal d1 = plane.distance(capsule.p1);
PxReal dmin;
PxVec3 point;
if(d0 < d1)
{
dmin = d0;
point = capsule.p0;
}
else
{
dmin = d1;
point = capsule.p1;
}
hit.normal = plane.n;
hit.distance = dmin - capsule.radius;
hit.position = point - hit.normal * dmin;
return true;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computePlane_BoxMTD(const PxPlane& plane, const Box& box, PxGeomSweepHit& hit)
{
PxVec3 pts[8];
box.computeBoxPoints(pts);
PxReal dmin = plane.distance(pts[0]);
PxU32 index = 0;
for(PxU32 i=1;i<8;i++)
{
const PxReal d = plane.distance(pts[i]);
if(dmin > d)
{
index = i;
dmin = d;
}
}
hit.normal = plane.n;
hit.distance = dmin;
hit.position = pts[index] - plane.n*dmin;
return true;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool physx::Gu::computePlane_ConvexMTD(const PxPlane& plane, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexPose, PxGeomSweepHit& hit)
{
const ConvexMesh* convexMesh = static_cast<const ConvexMesh*>(convexGeom.convexMesh);
const FastVertex2ShapeScaling convexScaling(convexGeom.scale);
PxU32 nbVerts = convexMesh->getNbVerts();
const PxVec3* PX_RESTRICT verts = convexMesh->getVerts();
PxVec3 worldPointMin = convexPose.transform(convexScaling * verts[0]);
PxReal dmin = plane.distance(worldPointMin);
for(PxU32 i=1;i<nbVerts;i++)
{
const PxVec3 worldPoint = convexPose.transform(convexScaling * verts[i]);
const PxReal d = plane.distance(worldPoint);
if(dmin > d)
{
dmin = d;
worldPointMin = worldPoint;
}
}
hit.normal = plane.n;
hit.distance = dmin;
hit.position = worldPointMin - plane.n * dmin;
return true;
}
| 38,568 | C++ | 32.480035 | 274 | 0.718601 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuGjkQuery.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxGjkQuery.h"
#include "GuInternal.h"
#include "GuOverlapTests.h"
#include "GuSweepTests.h"
#include "GuRaycastTests.h"
#include "GuBoxConversion.h"
#include "GuTriangleMesh.h"
#include "GuMTD.h"
#include "GuBounds.h"
#include "GuDistancePointSegment.h"
#include "GuConvexMesh.h"
#include "GuDistancePointBox.h"
#include "GuMidphaseInterface.h"
#include "foundation/PxFPU.h"
using namespace physx;
using namespace Gu;
#include "GuGJK.h"
#include "GuGJKPenetration.h"
#include "GuGJKRaycast.h"
#include "GuEPA.h"
#include "geomutils/PxContactBuffer.h"
using namespace aos;
static PX_SUPPORT_INLINE PxVec3 Vec3V_To_PxVec3(const Vec3V& a)
{
PxVec3 v;
V3StoreU(a, v);
return v;
}
static PX_SUPPORT_INLINE PxReal FloatV_To_PxReal(const FloatV& a)
{
PxF32 f;
FStore(a, &f);
return f;
}
struct CustomConvexV : ConvexV
{
const PxGjkQuery::Support* s;
PxReal supportScale;
CustomConvexV(const PxGjkQuery::Support& _s) : ConvexV(Gu::ConvexType::eCUSTOM), s(&_s), supportScale(1.0f)
{
setMinMargin(FLoad(0.001f));
setSweepMargin(FLoad(0.001f));
}
PX_SUPPORT_INLINE Vec3V supportPoint(const PxI32 /*index*/) const
{
return supportLocal(V3LoadU(PxVec3(1, 0, 0)));
}
PX_SUPPORT_INLINE Vec3V supportLocal(const Vec3V& dir) const
{
return V3Scale(V3LoadU(s->supportLocal(Vec3V_To_PxVec3(dir))), FLoad(supportScale));
}
PX_SUPPORT_INLINE Vec3V supportLocal(const Vec3V& dir, PxI32& index) const
{
index = 0;
return supportLocal(dir);
}
PX_SUPPORT_INLINE Vec3V supportRelative(const Vec3V& dir, const PxMatTransformV& aTob, const PxMatTransformV& aTobT) const
{
const Vec3V _dir = aTobT.rotate(dir);
const Vec3V p = supportLocal(_dir);
return aTob.transform(p);
}
PX_SUPPORT_INLINE Vec3V supportRelative(const Vec3V& dir, const PxMatTransformV& aTob, const PxMatTransformV& aTobT, PxI32& index) const
{
index = 0;
return supportRelative(dir, aTob, aTobT);
}
};
bool PxGjkQuery::proximityInfo(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB, PxReal contactDistance, PxReal toleranceLength, PxVec3& pointA, PxVec3& pointB, PxVec3& separatingAxis, PxReal& separation)
{
const PxTransformV transf0 = loadTransformU(poseA);
const PxTransformV transf1 = loadTransformU(poseB);
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
const PxReal degenerateScale = 0.001f;
CustomConvexV supportA(a);
CustomConvexV supportB(b);
const RelativeConvex<CustomConvexV> convexA(supportA, aToB);
const LocalConvex<CustomConvexV> convexB(supportB);
Vec3V initialSearchDir = aToB.p;
FloatV contactDist = FLoad((a.getMargin() + b.getMargin()) + contactDistance);
Vec3V aPoints[4];
Vec3V bPoints[4];
PxU8 size = 0;
GjkOutput output;
GjkStatus status = gjkPenetration(convexA, convexB, initialSearchDir, contactDist, true, aPoints, bPoints, size, output);
if (status == GJK_DEGENERATE)
{
supportA.supportScale = supportB.supportScale = 1.0f - degenerateScale;
status = gjkPenetration(convexA, convexB, initialSearchDir, contactDist, true, aPoints, bPoints, size, output);
supportA.supportScale = supportB.supportScale = 1.0f;
}
if (status == GJK_CONTACT || status == GJK_DEGENERATE)
{
separatingAxis = poseB.rotate(Vec3V_To_PxVec3(output.normal).getNormalized());
pointA = poseB.transform(Vec3V_To_PxVec3(output.closestA)) - separatingAxis * a.getMargin();
pointB = poseB.transform(Vec3V_To_PxVec3(output.closestB)) + separatingAxis * b.getMargin();
separation = (pointA - pointB).dot(separatingAxis);
return true;
}
if (status == EPA_CONTACT)
{
status = epaPenetration(convexA, convexB, aPoints, bPoints, size, true, FLoad(toleranceLength), output);
if (status == EPA_CONTACT || status == EPA_DEGENERATE)
{
separatingAxis = poseB.rotate(Vec3V_To_PxVec3(output.normal).getNormalized());
pointA = poseB.transform(Vec3V_To_PxVec3(output.closestA)) - separatingAxis * a.getMargin();
pointB = poseB.transform(Vec3V_To_PxVec3(output.closestB)) + separatingAxis * b.getMargin();
separation = (pointA - pointB).dot(separatingAxis);
return true;
}
}
return false;
}
struct PointConvexV : ConvexV
{
Vec3V zero;
PointConvexV() : ConvexV(Gu::ConvexType::eCUSTOM)
{
zero = V3Zero();
setMinMargin(FLoad(0.001f));
setSweepMargin(FLoad(0.001f));
}
PX_SUPPORT_INLINE Vec3V supportPoint(const PxI32 /*index*/) const
{
return zero;
}
PX_SUPPORT_INLINE Vec3V supportLocal(const Vec3V& /*dir*/) const
{
return zero;
}
PX_SUPPORT_INLINE Vec3V supportLocal(const Vec3V& dir, PxI32& index) const
{
index = 0;
return supportLocal(dir);
}
PX_SUPPORT_INLINE Vec3V supportRelative(const Vec3V& dir, const PxMatTransformV& aTob, const PxMatTransformV& aTobT) const
{
const Vec3V _dir = aTobT.rotate(dir);
const Vec3V p = supportLocal(_dir);
return aTob.transform(p);
}
PX_SUPPORT_INLINE Vec3V supportRelative(const Vec3V& dir, const PxMatTransformV& aTob, const PxMatTransformV& aTobT, PxI32& index) const
{
index = 0;
return supportRelative(dir, aTob, aTobT);
}
};
bool PxGjkQuery::raycast(const Support& shape, const PxTransform& pose, const PxVec3& rayStart, const PxVec3& unitDir, PxReal maxDist, PxReal& t, PxVec3& n, PxVec3& p)
{
const PxTransformV transf0 = loadTransformU(pose);
const PxTransformV transf1 = PxTransformV(V3LoadU(rayStart));
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
CustomConvexV supportA(shape);
PointConvexV supportB;
const RelativeConvex<CustomConvexV> convexA(supportA, aToB);
const LocalConvex<PointConvexV> convexB(supportB);
Vec3V initialDir = aToB.p;
FloatV initialLambda = FLoad(0);
Vec3V s = V3Zero();
Vec3V r = V3LoadU(unitDir * maxDist);
FloatV lambda;
Vec3V normal, closestA;
if (gjkRaycast(convexA, convexB, initialDir, initialLambda, s, r, lambda, normal, closestA, shape.getMargin()))
{
t = FloatV_To_PxReal(lambda) * maxDist;
n = -Vec3V_To_PxVec3(normal).getNormalized();
p = Vec3V_To_PxVec3(closestA) + n * shape.getMargin() + rayStart;
return true;
}
return false;
}
bool PxGjkQuery::overlap(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB)
{
const PxTransformV transf0 = loadTransformU(poseA);
const PxTransformV transf1 = loadTransformU(poseB);
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
CustomConvexV supportA(a);
CustomConvexV supportB(b);
const RelativeConvex<CustomConvexV> convexA(supportA, aToB);
const LocalConvex<CustomConvexV> convexB(supportB);
Vec3V initialSearchDir = aToB.p;
FloatV contactDist = FLoad(a.getMargin() + b.getMargin());
Vec3V closestA, closestB, normal;
FloatV distance;
GjkStatus status = gjk(convexA, convexB, initialSearchDir, contactDist, closestA, closestB, normal, distance);
return status == GJK_CLOSE || status == GJK_CONTACT;
}
bool PxGjkQuery::sweep(const Support& a, const Support& b, const PxTransform& poseA, const PxTransform& poseB, const PxVec3& unitDir, PxReal maxDist, PxReal& t, PxVec3& n, PxVec3& p)
{
const PxTransformV transf0 = loadTransformU(poseA);
const PxTransformV transf1 = loadTransformU(poseB);
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
CustomConvexV supportA(a);
CustomConvexV supportB(b);
const RelativeConvex<CustomConvexV> convexA(supportA, aToB);
const LocalConvex<CustomConvexV> convexB(supportB);
Vec3V initialDir = aToB.p;
FloatV initialLambda = FLoad(0);
Vec3V s = V3Zero();
Vec3V r = V3LoadU(poseB.rotateInv(unitDir * maxDist));
FloatV lambda;
Vec3V normal, closestA;
if (gjkRaycast(convexA, convexB, initialDir, initialLambda, s, r, lambda, normal, closestA, a.getMargin() + b.getMargin()))
{
t = FloatV_To_PxReal(lambda) * maxDist;
n = poseB.rotate(-(Vec3V_To_PxVec3(normal)).getNormalized());
p = poseB.transform(Vec3V_To_PxVec3(closestA)) + n * a.getMargin();
return true;
}
return false;
}
| 9,711 | C++ | 33.935252 | 242 | 0.748739 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBPruner.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/PxProfileZone.h"
#include "foundation/PxIntrinsics.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxBitUtils.h"
#include "GuAABBPruner.h"
#include "GuPrunerMergeData.h"
#include "GuCallbackAdapter.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuAABBTreeQuery.h"
#include "GuAABBTreeNode.h"
#include "GuQuery.h"
#include "CmVisualization.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
AABBPruner::AABBPruner(bool incrementalRebuild, PxU64 contextID, CompanionPrunerType cpType, BVHBuildStrategy buildStrategy, PxU32 nbObjectsPerNode) :
mAABBTree (NULL),
mNewTree (NULL),
mNbCachedBoxes (0),
mNbCalls (0),
mTimeStamp (0),
mBucketPruner (contextID, cpType, &mPool),
mProgress (BUILD_NOT_STARTED),
mRebuildRateHint (100),
mAdaptiveRebuildTerm(0),
mNbObjectsPerNode (nbObjectsPerNode),
mBuildStrategy (buildStrategy),
mPool (contextID, TRANSFORM_CACHE_GLOBAL),
mIncrementalRebuild (incrementalRebuild),
mUncommittedChanges (false),
mNeedsNewTree (false),
mNewTreeFixups ("AABBPruner::mNewTreeFixups")
{
PX_ASSERT(nbObjectsPerNode<16);
}
AABBPruner::~AABBPruner()
{
release();
}
bool AABBPruner::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count, bool hasPruningStructure)
{
PX_PROFILE_ZONE("SceneQuery.prunerAddObjects", mPool.mContextID);
if(!count)
return true;
// no need to do refitMarked for added objects since they are not in the tree
// if we have provided pruning structure, we will merge it, the changes will be applied after the objects has been addded
if(!hasPruningStructure || !mAABBTree)
mUncommittedChanges = true;
// PT: TODO: 'addObjects' for bucket pruner too. Not urgent since we always call the function with count=1 at the moment
const PxU32 valid = mPool.addObjects(results, bounds, data, transforms, count);
// Bucket pruner is only used while the dynamic pruner is rebuilding
// For the static pruner a full rebuild will happen in commit() every time we modify something, this is not true if
// pruning structure was provided. The objects tree will be merged directly into the static tree. No rebuild will be triggered.
if(mIncrementalRebuild && mAABBTree)
{
PX_PROFILE_ZONE("SceneQuery.bucketPrunerAddObjects", mPool.mContextID);
mNeedsNewTree = true; // each add forces a tree rebuild
// if a pruner structure is provided, we dont move the new objects into bucket pruner
// the pruning structure will be merged into the bucket pruner
if(!hasPruningStructure)
{
for(PxU32 i=0;i<valid;i++)
{
// PT: poolIndex fetched in vain for bucket pruner companion...
// Since the incremental tree references the same pool we could just retrieve the poolIndex there, from the handle...
const PrunerHandle handle = results[i];
const PoolIndex poolIndex = mPool.getIndex(handle);
mBucketPruner.addObject(data[i], handle, bounds[i], transforms[i], mTimeStamp, poolIndex);
}
}
}
return valid==count;
}
void AABBPruner::updateObjects(const PrunerHandle* handles, PxU32 count, float inflation, const PxU32* boundsIndices, const PxBounds3* newBounds, const PxTransform32* newTransforms)
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateObjects", mPool.mContextID);
if(!count)
return;
mUncommittedChanges = true;
if(handles && boundsIndices && newBounds)
mPool.updateAndInflateBounds(handles, boundsIndices, newBounds, newTransforms, count, inflation);
if(mIncrementalRebuild && mAABBTree)
{
mNeedsNewTree = true; // each update forces a tree rebuild
const PxBounds3* currentBounds = mPool.getCurrentWorldBoxes();
const PxTransform* currentTransforms = mPool.getTransforms();
const PrunerPayload* data = mPool.getObjects();
const bool addToRefit = mProgress == BUILD_NEW_MAPPING || mProgress == BUILD_FULL_REFIT || mProgress==BUILD_LAST_FRAME;
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle handle = handles[i];
const PoolIndex poolIndex = mPool.getIndex(handle);
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex != INVALID_NODE_ID) // this means it's in the current tree still and hasn't been removed
mAABBTree->markNodeForRefit(treeNodeIndex);
else // otherwise it means it should be in the bucket pruner
{
PX_ASSERT(&data[poolIndex]==&mPool.getPayloadData(handle));
bool found = mBucketPruner.updateObject(currentBounds[poolIndex], currentTransforms[poolIndex], data[poolIndex], handle, poolIndex);
PX_UNUSED(found); PX_ASSERT(found);
}
if(addToRefit)
mToRefit.pushBack(poolIndex);
}
}
}
void AABBPruner::removeObjects(const PrunerHandle* handles, PxU32 count, PrunerPayloadRemovalCallback* removalCallback)
{
PX_PROFILE_ZONE("SceneQuery.prunerRemoveObjects", mPool.mContextID);
if(!count)
return;
mUncommittedChanges = true;
for(PxU32 i=0; i<count; i++)
{
const PrunerHandle h = handles[i];
// copy the payload/userdata before removing it since we need to know the payload/userdata to remove it from the bucket pruner
const PrunerPayload removedData = mPool.getPayloadData(h);
const PoolIndex poolIndex = mPool.getIndex(h); // save the pool index for removed object
const PoolIndex poolRelocatedLastIndex = mPool.removeObject(h, removalCallback); // save the lastIndex returned by removeObject
if(mIncrementalRebuild && mAABBTree)
{
mNeedsNewTree = true;
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex]; // already removed from pool but still in tree map
const PrunerPayload swappedData = mPool.getObjects()[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID) // can be invalid if removed
{
mAABBTree->markNodeForRefit(treeNodeIndex); // mark the spot as blank
mBucketPruner.swapIndex(poolIndex, swappedData, poolRelocatedLastIndex); // if swapped index is in bucket pruner
}
else
{
bool status = mBucketPruner.removeObject(removedData, h, poolIndex, swappedData, poolRelocatedLastIndex);
// PT: removed assert to avoid crashing all UTs
//PX_ASSERT(status);
PX_UNUSED(status);
}
mTreeMap.invalidate(poolIndex, poolRelocatedLastIndex, *mAABBTree);
if(mNewTree)
mNewTreeFixups.pushBack(NewTreeFixup(poolIndex, poolRelocatedLastIndex));
}
}
if (mPool.getNbActiveObjects()==0)
{
// this is just to make sure we release all the internal data once all the objects are out of the pruner
// since this is the only place we know that and we don't want to keep memory reserved
release();
// Pruner API requires a commit before the next query, even if we ended up removing the entire tree here. This
// forces that to happen.
mUncommittedChanges = true;
}
}
bool AABBPruner::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcbArgName) const
{
PX_ASSERT(!mUncommittedChanges);
bool again = true;
if(mAABBTree)
{
OverlapCallbackAdapter pcb(pcbArgName, mPool);
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, AABBAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
again = AABBTreeOverlap<true, CapsuleAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
again = AABBTreeOverlap<true, SphereAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, test, pcb);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.overlap(queryVolume, pcbArgName);
return again;
}
bool AABBPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
PX_ASSERT(!mUncommittedChanges);
bool again = true;
if(mAABBTree)
{
RaycastCallbackAdapter pcb(pcbArgName, mPool);
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
again = AABBTreeRaycast<true, true, AABBTree, BVHNode, RaycastCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, aabb.getCenter(), unitDir, inOutDistance, aabb.getExtents(), pcb);
}
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.sweep(queryVolume, unitDir, inOutDistance, pcbArgName);
return again;
}
bool AABBPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
PX_ASSERT(!mUncommittedChanges);
bool again = true;
if(mAABBTree)
{
RaycastCallbackAdapter pcb(pcbArgName, mPool);
again = AABBTreeRaycast<false, true, AABBTree, BVHNode, RaycastCallbackAdapter>()(mPool.getCurrentAABBTreeBounds(), *mAABBTree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
}
if(again && mIncrementalRebuild && mBucketPruner.getNbObjects())
again = mBucketPruner.raycast(origin, unitDir, inOutDistance, pcbArgName);
return again;
}
// This isn't part of the pruner virtual interface, but it is part of the public interface
// of AABBPruner - it gets called by SqManager to force a rebuild, and requires a commit() before
// queries can take place
void AABBPruner::purge()
{
release();
mUncommittedChanges = true; // this ensures a commit() must happen before any query
}
void AABBPruner::setRebuildRateHint(PxU32 nbStepsForRebuild)
{
PX_ASSERT(nbStepsForRebuild > 3);
mRebuildRateHint = (nbStepsForRebuild-3); // looks like a magic number to account for the rebuild pipeline latency
mAdaptiveRebuildTerm = 0;
}
// Commit either performs a refit if background rebuild is not yet finished
// or swaps the current tree for the second tree rebuilt in the background
void AABBPruner::commit()
{
PX_PROFILE_ZONE("SceneQuery.prunerCommit", mPool.mContextID);
if(!mUncommittedChanges && (mProgress != BUILD_FINISHED))
// Q: seems like this is both for refit and finalization so is this is correct?
// i.e. in a situation when we started rebuilding a tree and didn't add anything since
// who is going to set mUncommittedChanges to true?
// A: it's set in buildStep at final stage, so that finalization is forced.
// Seems a bit difficult to follow and verify correctness.
return;
mUncommittedChanges = false;
if(!mAABBTree || !mIncrementalRebuild)
{
if(!mIncrementalRebuild && mAABBTree)
PxGetFoundation().error(PxErrorCode::ePERF_WARNING, PX_FL, "SceneQuery static AABB Tree rebuilt, because a shape attached to a static actor was added, removed or moved, and PxSceneQueryDesc::staticStructure is set to eSTATIC_AABB_TREE.");
fullRebuildAABBTree();
return;
}
// Note: it is not safe to call AABBPruner::build() here
// because the first thread will perform one step of the incremental update,
// continue raycasting, while the second thread performs the next step in
// the incremental update
// Calling Refit() below is safe. It will call
// StaticPruner::build() when necessary. Both will early
// exit if the tree is already up to date, if it is not already, then we
// must be the first thread performing raycasts on a dirty tree and other
// scene query threads will be locked out by the write lock in
// PrunerManager::flushUpdates()
if (mProgress != BUILD_FINISHED)
{
// Calling refit because the second tree is not ready to be swapped in (mProgress != BUILD_FINISHED)
// Generally speaking as long as things keep moving the second build will never catch up with true state
refitUpdatedAndRemoved();
}
else
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeFinalize", mPool.mContextID);
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeSwitch", mPool.mContextID);
PX_DELETE(mAABBTree); // delete the old tree
mCachedBoxes.release();
mProgress = BUILD_NOT_STARTED; // reset the build state to initial
// Adjust adaptive term to get closer to specified rebuild rate.
// perform an even division correction to make sure the rebuild rate adds up
if (mNbCalls > mRebuildRateHint)
mAdaptiveRebuildTerm++;
else if (mNbCalls < mRebuildRateHint)
mAdaptiveRebuildTerm--;
// Switch trees
#if PX_DEBUG
mNewTree->validate();
#endif
mAABBTree = mNewTree; // set current tree to progressively rebuilt tree
mNewTree = NULL; // clear out the progressively rebuild tree pointer
mNodeAllocator.release();
}
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeMapping", mPool.mContextID);
// rebuild the tree map to match the current (newly built) tree
mTreeMap.initMap(PxMax(mPool.getNbActiveObjects(), mNbCachedBoxes), *mAABBTree);
// The new mapping has been computed using only indices stored in the new tree. Those indices map the pruning pool
// we had when starting to build the tree. We need to re-apply recorded moves to fix the tree that finished rebuilding.
// AP: the problem here is while we are rebuilding the tree there are ongoing modifications to the current tree
// but the background build has a cached copy of all the AABBs at the time it was started
// (and will produce indices referencing those)
// Things that can happen in the meantime: update, remove, add, commit
for(NewTreeFixup* r = mNewTreeFixups.begin(); r < mNewTreeFixups.end(); r++)
{
// PT: we're not doing a full refit after this point anymore, so the remaining deleted objects must be manually marked for
// refit (otherwise their AABB in the tree would remain valid, leading to crashes when the corresponding index is 0xffffffff).
// We must do this before invalidating the corresponding tree nodes in the map, obviously (otherwise we'd be reading node
// indices that we already invalidated).
const PoolIndex poolIndex = r->removedIndex;
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID)
mAABBTree->markNodeForRefit(treeNodeIndex);
mTreeMap.invalidate(r->removedIndex, r->relocatedLastIndex, *mAABBTree);
}
mNewTreeFixups.clear(); // clear out the fixups since we just applied them all
}
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeFinalRefit", mPool.mContextID);
const PxU32 size = mToRefit.size();
for(PxU32 i=0;i<size;i++)
{
const PoolIndex poolIndex = mToRefit[i];
const TreeNodeIndex treeNodeIndex = mTreeMap[poolIndex];
if(treeNodeIndex!=INVALID_NODE_ID)
mAABBTree->markNodeForRefit(treeNodeIndex);
}
mToRefit.clear();
refitUpdatedAndRemoved();
}
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeRemoveObjects", mPool.mContextID);
PxU32 nbRemovedPairs = mBucketPruner.removeMarkedObjects(mTimeStamp-1);
PX_UNUSED(nbRemovedPairs);
mNeedsNewTree = mBucketPruner.getNbObjects()>0;
}
}
updateBucketPruner();
}
void AABBPruner::shiftOrigin(const PxVec3& shift)
{
mPool.shiftOrigin(shift);
if(mAABBTree)
mAABBTree->shiftOrigin(shift);
if(mIncrementalRebuild)
mBucketPruner.shiftOrigin(shift);
if(mNewTree)
mNewTree->shiftOrigin(shift);
}
void AABBPruner::visualize(PxRenderOutput& out, PxU32 primaryColor, PxU32 secondaryColor) const
{
// getAABBTree() asserts when pruner is dirty. NpScene::visualization() does not enforce flushUpdate. see DE7834
visualizeTree(out, primaryColor, mAABBTree);
// Render added objects not yet in the tree
out << PxTransform(PxIdentity);
out << PxU32(PxDebugColor::eARGB_WHITE);
if(mIncrementalRebuild && mBucketPruner.getNbObjects())
mBucketPruner.visualize(out, secondaryColor);
}
bool AABBPruner::buildStep(bool synchronousCall)
{
PX_PROFILE_ZONE("SceneQuery.prunerBuildStep", mPool.mContextID);
PX_ASSERT(mIncrementalRebuild);
if(mNeedsNewTree)
{
if(mProgress==BUILD_NOT_STARTED)
{
if(!synchronousCall || !prepareBuild())
return false;
}
else if(mProgress==BUILD_INIT)
{
mNewTree->progressiveBuild(mBuilder, mNodeAllocator, mBuildStats, 0, 0);
mProgress = BUILD_IN_PROGRESS;
mNbCalls = 0;
// Use a heuristic to estimate the number of work units needed for rebuilding the tree.
// The general idea is to use the number of work units of the previous tree to build the new tree.
// This works fine as long as the number of leaves remains more or less the same for the old and the
// new tree. If that is not the case, this estimate can be way off and the work units per step will
// be either much too small or too large. Hence, in that case we will try to estimate the number of work
// units based on the number of leaves of the new tree as follows:
//
// - Assume new tree with n leaves is perfectly-balanced
// - Compute the depth of perfectly-balanced tree with n leaves
// - Estimate number of working units for the new tree
const PxU32 depth = PxILog2(mBuilder.mNbPrimitives); // Note: This is the depth without counting the leaf layer
const PxU32 estimatedNbWorkUnits = depth * mBuilder.mNbPrimitives; // Estimated number of work units for new tree
const PxU32 estimatedNbWorkUnitsOld = mAABBTree ? mAABBTree->getTotalPrims() : 0;
if ((estimatedNbWorkUnits <= (estimatedNbWorkUnitsOld << 1)) && (estimatedNbWorkUnits >= (estimatedNbWorkUnitsOld >> 1)))
// The two estimates do not differ by more than a factor 2
mTotalWorkUnits = estimatedNbWorkUnitsOld;
else
{
mAdaptiveRebuildTerm = 0;
mTotalWorkUnits = estimatedNbWorkUnits;
}
const PxI32 totalWorkUnits = PxI32(mTotalWorkUnits + (mAdaptiveRebuildTerm * mBuilder.mNbPrimitives));
mTotalWorkUnits = PxU32(PxMax(totalWorkUnits, 0));
}
else if(mProgress==BUILD_IN_PROGRESS)
{
mNbCalls++;
const PxU32 Limit = 1 + (mTotalWorkUnits / mRebuildRateHint);
// looks like progressiveRebuild returns 0 when finished
if(!mNewTree->progressiveBuild(mBuilder, mNodeAllocator, mBuildStats, 1, Limit))
{
// Done
mProgress = BUILD_NEW_MAPPING;
#if PX_DEBUG
mNewTree->validate();
#endif
}
}
else if(mProgress==BUILD_NEW_MAPPING)
{
mNbCalls++;
mProgress = BUILD_FULL_REFIT;
// PT: we can't call fullRefit without creating the new mapping first: the refit function will fetch boxes from
// the pool using "primitive indices" captured in the tree. But some of these indices may have been invalidated
// if objects got removed while the tree was built. So we need to invalidate the corresponding nodes before refit,
// that way the #prims will be zero and the code won't fetch a wrong box (which may now below to a different object).
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeMapping", mPool.mContextID);
if(mNewTreeFixups.size())
{
mNewTreeMap.initMap(PxMax(mPool.getNbActiveObjects(), mNbCachedBoxes), *mNewTree);
// The new mapping has been computed using only indices stored in the new tree. Those indices map the pruning pool
// we had when starting to build the tree. We need to re-apply recorded moves to fix the tree.
for(NewTreeFixup* r = mNewTreeFixups.begin(); r < mNewTreeFixups.end(); r++)
mNewTreeMap.invalidate(r->removedIndex, r->relocatedLastIndex, *mNewTree);
mNewTreeFixups.clear();
#if PX_DEBUG
mNewTree->validate();
#endif
}
}
}
else if(mProgress==BUILD_FULL_REFIT)
{
mNbCalls++;
mProgress = BUILD_LAST_FRAME;
{
PX_PROFILE_ZONE("SceneQuery.prunerNewTreeFullRefit", mPool.mContextID);
// We need to refit the new tree because objects may have moved while we were building it.
mNewTree->fullRefit(mPool.getCurrentWorldBoxes());
}
}
else if(mProgress==BUILD_LAST_FRAME)
{
mProgress = BUILD_FINISHED;
}
// This is required to be set because commit handles both refit and a portion of build finalization (why?)
// This is overly conservative also only necessary in case there were no updates at all to the tree since the last tree swap
// It also overly conservative in a sense that it could be set only if mProgress was just set to BUILD_FINISHED
// If run asynchronously from a different thread, we touched just the new AABB build phase, we should not mark the main tree as dirty
if(synchronousCall)
mUncommittedChanges = true;
return mProgress==BUILD_FINISHED;
}
return false;
}
bool AABBPruner::prepareBuild()
{
PX_PROFILE_ZONE("SceneQuery.prepareBuild", mPool.mContextID);
PX_ASSERT(mIncrementalRebuild);
if(mNeedsNewTree)
{
if(mProgress==BUILD_NOT_STARTED)
{
const PxU32 nbObjects = mPool.getNbActiveObjects();
if(!nbObjects)
return false;
mNodeAllocator.release();
PX_DELETE(mNewTree);
mNewTree = PX_NEW(AABBTree);
mNbCachedBoxes = nbObjects;
mCachedBoxes.init(nbObjects, mPool.getCurrentWorldBoxes());
// PT: objects currently in the bucket pruner will be in the new tree. They are marked with the
// current timestamp (mTimeStamp). However more objects can get added while we compute the new tree,
// and those ones will not be part of it. These new objects will be marked with the new timestamp
// value (mTimeStamp+1), and we can use these different values to remove the proper objects from
// the bucket pruner (when switching to the new tree).
mTimeStamp++;
// notify the incremental pruner to swap trees (for incremental pruner companion)
mBucketPruner.timeStampChange();
mBuilder.reset();
mBuilder.mNbPrimitives = mNbCachedBoxes;
mBuilder.mBounds = &mCachedBoxes;
mBuilder.mLimit = mNbObjectsPerNode;
mBuilder.mBuildStrategy = mBuildStrategy;
mBuildStats.reset();
// start recording modifications to the tree made during rebuild to reapply (fix the new tree) eventually
PX_ASSERT(mNewTreeFixups.size()==0);
mProgress = BUILD_INIT;
}
}
else
return false;
return true;
}
/**
* Builds an AABB-tree for objects in the pruning pool.
* \return true if success
*/
bool AABBPruner::fullRebuildAABBTree()
{
PX_PROFILE_ZONE("SceneQuery.prunerFullRebuildAABBTree", mPool.mContextID);
// Release possibly already existing tree
PX_DELETE(mAABBTree);
// Don't bother building an AABB-tree if there isn't a single static object
const PxU32 nbObjects = mPool.getNbActiveObjects();
if(!nbObjects)
return true;
bool Status;
{
// Create a new tree
mAABBTree = PX_NEW(AABBTree);
Status = mAABBTree->build(AABBTreeBuildParams(mNbObjectsPerNode, nbObjects, &mPool.getCurrentAABBTreeBounds(), mBuildStrategy), mNodeAllocator);
}
// No need for the tree map for static pruner
if(mIncrementalRebuild)
mTreeMap.initMap(PxMax(nbObjects, mNbCachedBoxes), *mAABBTree);
return Status;
}
// called in the end of commit(), but only if mIncrementalRebuild is true
void AABBPruner::updateBucketPruner()
{
PX_PROFILE_ZONE("SceneQuery.prunerUpdateBucketPruner", mPool.mContextID);
PX_ASSERT(mIncrementalRebuild);
mBucketPruner.build();
}
void AABBPruner::release() // this can be called from purge()
{
mBucketPruner.release();
mTimeStamp = 0;
mTreeMap.release();
mNewTreeMap.release();
mCachedBoxes.release();
mBuilder.reset();
mNodeAllocator.release();
PX_DELETE(mNewTree);
PX_DELETE(mAABBTree);
mNbCachedBoxes = 0;
mProgress = BUILD_NOT_STARTED;
mNewTreeFixups.clear();
mUncommittedChanges = false;
}
// Refit current tree
void AABBPruner::refitUpdatedAndRemoved()
{
PX_PROFILE_ZONE("SceneQuery.prunerRefitUpdatedAndRemoved", mPool.mContextID);
PX_ASSERT(mIncrementalRebuild);
AABBTree* tree = getAABBTree();
if(!tree)
return;
#if PX_DEBUG
tree->validate();
#endif
//### missing a way to skip work if not needed
const PxU32 nbObjects = mPool.getNbActiveObjects();
// At this point there still can be objects in the tree that are blanked out so it's an optimization shortcut (not required)
if(!nbObjects)
return;
mBucketPruner.refitMarkedNodes(mPool.getCurrentWorldBoxes());
tree->refitMarkedNodes(mPool.getCurrentWorldBoxes());
}
void AABBPruner::merge(const void* mergeParams)
{
const AABBPrunerMergeData& pruningStructure = *reinterpret_cast<const AABBPrunerMergeData*> (mergeParams);
if(!pruningStructure.mAABBTreeNodes)
return;
if(mAABBTree)
{
// index in pruning pool, where new objects were added
const PxU32 pruningPoolIndex = mPool.getNbActiveObjects() - pruningStructure.mNbObjects;
// create tree from given nodes and indices
AABBTreeMergeData aabbTreeMergeParams(pruningStructure.mNbNodes, pruningStructure.mAABBTreeNodes,
pruningStructure.mNbObjects, pruningStructure.mAABBTreeIndices, pruningPoolIndex);
if(!mIncrementalRebuild)
{
// merge tree directly
mAABBTree->mergeTree(aabbTreeMergeParams);
}
else
{
mBucketPruner.addTree(aabbTreeMergeParams, mTimeStamp);
}
}
}
void AABBPruner::getGlobalBounds(PxBounds3& bounds) const
{
if(mAABBTree && mAABBTree->getNodes())
bounds = mAABBTree->getNodes()->mBV;
else
bounds.setEmpty();
if(mIncrementalRebuild && mBucketPruner.getNbObjects())
{
PxBounds3 extBounds;
mBucketPruner.getGlobalBounds(extBounds);
bounds.include(extBounds);
}
}
| 27,670 | C++ | 34.843264 | 241 | 0.744742 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSDF.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSDF.h"
#include "GuAABBTreeNode.h"
#include "GuAABBTree.h"
#include "GuAABBTreeBounds.h"
#include "GuWindingNumber.h"
#include "GuAABBTreeNode.h"
#include "GuDistancePointBox.h"
#include "GuDistancePointTriangle.h"
#include "GuAABBTreeQuery.h"
#include "GuIntersectionRayTriangle.h"
#include "GuIntersectionRayBox.h"
#include "foundation/PxAtomic.h"
#include "foundation/PxThread.h"
#include "common/GuMeshAnalysis.h"
#include "GuMeshAnalysis.h"
#include "PxSDFBuilder.h"
#include "GuDistancePointSegment.h"
#include "common/PxSerialFramework.h"
#define EXTENDED_DEBUG 0
namespace physx
{
namespace Gu
{
SDF::~SDF()
{
if(mOwnsMemory)
{
PX_FREE(mSdf);
PX_FREE(mSubgridStartSlots);
PX_FREE(mSubgridSdf);
}
}
PxReal* SDF::allocateSdfs(const PxVec3& meshLower, const PxReal& spacing, const PxU32 dimX, const PxU32 dimY, const PxU32 dimZ,
const PxU32 subgridSize, const PxU32 sdfSubgrids3DTexBlockDimX, const PxU32 sdfSubgrids3DTexBlockDimY, const PxU32 sdfSubgrids3DTexBlockDimZ,
PxReal minSdfValueSubgrids, PxReal maxSdfValueSubgrids, PxU32 sparsePixelNumBytes)
{
PX_ASSERT(!mSdf);
PX_ASSERT(!mSubgridStartSlots);
PX_ASSERT(!mSubgridSdf);
mMeshLower = meshLower;
mSpacing = spacing;
mDims.x = dimX;
mDims.y = dimY;
mDims.z = dimZ;
mSubgridSize = subgridSize;
mSdfSubgrids3DTexBlockDim.x = sdfSubgrids3DTexBlockDimX;
mSdfSubgrids3DTexBlockDim.y = sdfSubgrids3DTexBlockDimY;
mSdfSubgrids3DTexBlockDim.z = sdfSubgrids3DTexBlockDimZ;
mSubgridsMinSdfValue = minSdfValueSubgrids;
mSubgridsMaxSdfValue = maxSdfValueSubgrids;
mBytesPerSparsePixel = sparsePixelNumBytes;
if (subgridSize > 0)
{
//Sparse sdf
PX_ASSERT(dimX % subgridSize == 0);
PX_ASSERT(dimY % subgridSize == 0);
PX_ASSERT(dimZ % subgridSize == 0);
PxU32 x = dimX / subgridSize;
PxU32 y = dimY / subgridSize;
PxU32 z = dimZ / subgridSize;
mNumSdfs = (x + 1) * (y + 1) * (z + 1);
mNumSubgridSdfs = mBytesPerSparsePixel * sdfSubgrids3DTexBlockDimX * (subgridSize + 1) * sdfSubgrids3DTexBlockDimY * (subgridSize + 1) * sdfSubgrids3DTexBlockDimZ * (subgridSize + 1);
mNumStartSlots = x * y * z;
mSubgridSdf = PX_ALLOCATE(PxU8, mNumSubgridSdfs, "PxU8");
mSubgridStartSlots = PX_ALLOCATE(PxU32, mNumStartSlots, "PxU32");
mSdf = PX_ALLOCATE(PxReal, mNumSdfs, "PxReal");
}
else
{
//Dense sdf - no sparse grid data required
mSubgridStartSlots = NULL;
mSubgridSdf = NULL;
mNumSdfs = dimX * dimY*dimZ;
mNumSubgridSdfs = 0;
mNumStartSlots = 0;
mSdf = PX_ALLOCATE(PxReal, mNumSdfs, "PxReal");
}
return mSdf;
}
void SDF::exportExtraData(PxSerializationContext& context)
{
if (mSdf)
{
context.alignData(PX_SERIAL_ALIGN);
context.writeData(mSdf, mNumSdfs * sizeof(PxReal));
}
if (mNumStartSlots)
{
context.alignData(PX_SERIAL_ALIGN);
context.writeData(mSubgridStartSlots, mNumStartSlots * sizeof(PxU32));
}
if (mSubgridSdf)
{
context.alignData(PX_SERIAL_ALIGN);
context.writeData(mSubgridSdf, mNumSubgridSdfs * sizeof(PxU8));
}
}
void SDF::importExtraData(PxDeserializationContext& context)
{
if (mSdf)
mSdf = context.readExtraData<PxReal, PX_SERIAL_ALIGN>(mNumSdfs);
if (mSubgridStartSlots)
mSubgridStartSlots = context.readExtraData<PxU32, PX_SERIAL_ALIGN>(mNumStartSlots);
if (mSubgridSdf)
mSubgridSdf = context.readExtraData<PxU8, PX_SERIAL_ALIGN>(mNumSubgridSdfs);
}
void buildTree(const PxU32* triangles, const PxU32 numTriangles, const PxVec3* points, PxArray<Gu::BVHNode>& tree, PxF32 enlargement = 1e-4f)
{
//Computes a bounding box for every triangle in triangles
Gu::AABBTreeBounds boxes;
boxes.init(numTriangles);
for (PxU32 i = 0; i < numTriangles; ++i)
{
const PxU32* tri = &triangles[3 * i];
PxBounds3 box = PxBounds3::empty();
box.include(points[tri[0]]);
box.include(points[tri[1]]);
box.include(points[tri[2]]);
box.fattenFast(enlargement);
boxes.getBounds()[i] = box;
}
Gu::buildAABBTree(numTriangles, boxes, tree);
}
class LineSegmentTrimeshIntersectionTraversalController
{
private:
const PxU32* mTriangles;
const PxVec3* mPoints;
PxVec3 mSegmentStart;
PxVec3 mSegmentEnd;
PxVec3 mDirection;
bool mIntersects;
public:
LineSegmentTrimeshIntersectionTraversalController(const PxU32* triangles, const PxVec3* points, PxVec3 segmentStart, PxVec3 segmentEnd)
: mTriangles(triangles), mPoints(points), mSegmentStart(segmentStart), mSegmentEnd(segmentEnd), mDirection(segmentEnd - segmentStart), mIntersects(false)
{
}
void reset(PxVec3 segmentStart, PxVec3 segmentEnd)
{
mSegmentStart = segmentStart;
mSegmentEnd = segmentEnd;
mDirection = segmentEnd - segmentStart;
mIntersects = false;
}
bool intersectionDetected() const
{
return mIntersects;
}
PX_FORCE_INLINE Gu::TraversalControl::Enum analyze(const Gu::BVHNode& node, PxI32)
{
if (node.isLeaf())
{
PxI32 j = node.getPrimitiveIndex();
const PxU32* tri = &mTriangles[3 * j];
PxReal at, au, av;
if (Gu::intersectRayTriangle(mSegmentStart, mDirection, mPoints[tri[0]], mPoints[tri[1]], mPoints[tri[2]], at, au, av, false, 1e-4f) && at >= 0.0f && at <= 1.0f)
{
mIntersects = true;
return TraversalControl::eAbort;
}
return TraversalControl::eDontGoDeeper;
}
PxReal tnear, tfar;
if (Gu::intersectRayAABB(node.mBV.minimum, node.mBV.maximum, mSegmentStart, mDirection, tnear, tfar) >= 0 && ((tnear >= 0.0f && tnear <= 1.0f) || (tfar >= 0.0f && tfar <= 1.0f) || node.mBV.contains(mSegmentStart)))
return TraversalControl::eGoDeeper;
return TraversalControl::eDontGoDeeper;
}
private:
PX_NOCOPY(LineSegmentTrimeshIntersectionTraversalController)
};
class ClosestDistanceToTrimeshTraversalController
{
private:
PxReal mClosestDistanceSquared;
const PxU32* mTriangles;
const PxVec3* mPoints;
const Gu::BVHNode* mNodes;
PxVec3 mQueryPoint;
PxVec3 mClosestPoint;
PxI32 mClosestTriId;
public:
PX_FORCE_INLINE ClosestDistanceToTrimeshTraversalController(){}
PX_FORCE_INLINE ClosestDistanceToTrimeshTraversalController(const PxU32* triangles, const PxVec3* points, Gu::BVHNode* nodes) :
mTriangles(triangles), mPoints(points), mNodes(nodes), mQueryPoint(0.0f), mClosestPoint(0.0f), mClosestTriId(-1)
{
initialize(triangles, points, nodes);
}
void initialize(const PxU32* triangles, const PxVec3* points, Gu::BVHNode* nodes)
{
mTriangles = triangles;
mPoints = points;
mNodes = nodes;
mQueryPoint = PxVec3(0.0f);
mClosestPoint = PxVec3(0.0f);
mClosestTriId = -1;
mClosestDistanceSquared = PX_MAX_F32;
}
PX_FORCE_INLINE void setQueryPoint(const PxVec3& queryPoint)
{
this->mQueryPoint = queryPoint;
mClosestDistanceSquared = FLT_MAX;
mClosestPoint = PxVec3(0.0f);
mClosestTriId = -1;
}
PX_FORCE_INLINE const PxVec3& getClosestPoint() const
{
return mClosestPoint;
}
PX_FORCE_INLINE PxReal distancePointBoxSquared(const PxBounds3& box, const PxVec3& point)
{
PxVec3 closestPt = box.minimum.maximum(box.maximum.minimum(point));
return (closestPt - point).magnitudeSquared();
}
PX_FORCE_INLINE Gu::TraversalControl::Enum analyze(const Gu::BVHNode& node, PxI32)
{
if (distancePointBoxSquared(node.mBV, mQueryPoint) >= mClosestDistanceSquared)
return Gu::TraversalControl::eDontGoDeeper;
if (node.isLeaf())
{
const PxI32 j = node.getPrimitiveIndex();
const PxU32* tri = &mTriangles[3 * j];
aos::FloatV t1, t2;
aos::Vec3V q = V3LoadU(mQueryPoint);
aos::Vec3V a = V3LoadU(mPoints[tri[0]]);
aos::Vec3V b = V3LoadU(mPoints[tri[1]]);
aos::Vec3V c = V3LoadU(mPoints[tri[2]]);
aos::Vec3V cp;
aos::FloatV d = Gu::distancePointTriangleSquared2UnitBox(q, a, b, c, t1, t2, cp);
PxReal d2;
FStore(d, &d2);
PxVec3 closest;
V3StoreU(cp, closest);
//const PxVec3 closest = closestPtPointTriangle2UnitBox(mQueryPoint, mPoints[tri[0]], mPoints[tri[1]], mPoints[tri[2]]);
//PxReal d2 = (closest - mQueryPoint).magnitudeSquared();
if (d2 < mClosestDistanceSquared)
{
mClosestDistanceSquared = d2;
mClosestTriId = j;
mClosestPoint = closest;
}
return Gu::TraversalControl::eDontGoDeeper;
}
const Gu::BVHNode& nodePos = mNodes[node.getPosIndex()];
const PxReal distSquaredPos = distancePointBoxSquared(nodePos.mBV, mQueryPoint);
const Gu::BVHNode& nodeNeg = mNodes[node.getNegIndex()];
const PxReal distSquaredNeg = distancePointBoxSquared(nodeNeg.mBV, mQueryPoint);
if (distSquaredPos < distSquaredNeg)
{
if (distSquaredPos < mClosestDistanceSquared)
return Gu::TraversalControl::eGoDeeper;
}
else
{
if (distSquaredNeg < mClosestDistanceSquared)
return Gu::TraversalControl::eGoDeeperNegFirst;
}
return Gu::TraversalControl::eDontGoDeeper;
}
PxI32 getClosestTriId() const { return mClosestTriId; }
void setClosestStart(const PxReal closestDistanceSquared, PxI32 closestTriangle, const PxVec3& closestPoint)
{
mClosestDistanceSquared = closestDistanceSquared;
mClosestTriId = closestTriangle;
mClosestPoint = closestPoint;
}
private:
PX_NOCOPY(ClosestDistanceToTrimeshTraversalController)
};
class PointOntoTriangleMeshProjector : public PxPointOntoTriangleMeshProjector, public PxUserAllocated
{
PxArray<Gu::BVHNode> mNodes;
ClosestDistanceToTrimeshTraversalController mEvaluator;
public:
PointOntoTriangleMeshProjector(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangles)
{
buildTree(indices, numTriangles, vertices, mNodes);
mEvaluator.initialize(indices, vertices, mNodes.begin());
}
virtual PxVec3 projectPoint(const PxVec3& point) PX_OVERRIDE
{
mEvaluator.setQueryPoint(point);
Gu::traverseBVH(mNodes.begin(), mEvaluator);
PxVec3 closestPoint = mEvaluator.getClosestPoint();
return closestPoint;
}
virtual PxVec3 projectPoint(const PxVec3& point, PxU32& closetTriangleIndex) PX_OVERRIDE
{
mEvaluator.setQueryPoint(point);
Gu::traverseBVH(mNodes.begin(), mEvaluator);
PxVec3 closestPoint = mEvaluator.getClosestPoint();
closetTriangleIndex = mEvaluator.getClosestTriId();
return closestPoint;
}
virtual void release() PX_OVERRIDE
{
mNodes.reset();
PX_FREE_THIS;
}
};
PxPointOntoTriangleMeshProjector* PxCreatePointOntoTriangleMeshProjector(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices)
{
return PX_NEW(PointOntoTriangleMeshProjector)(vertices, indices, numTriangleIndices);
}
void windingNumbers(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
PxReal* windingNumbers, PxVec3 min, PxVec3 max, PxVec3* sampleLocations)
{
const PxVec3 extents(max - min);
const PxVec3 delta(extents.x / width, extents.y / height, extents.z / depth);
const PxVec3 offset = min + PxVec3(0.5f * delta.x, 0.5f * delta.y, 0.5f * delta.z);
PxArray<Gu::BVHNode> tree;
buildTree(indices, numTriangleIndices / 3, vertices, tree);
PxHashMap<PxU32, Gu::ClusterApproximation> clusters;
Gu::precomputeClusterInformation(tree.begin(), indices, numTriangleIndices / 3, vertices, clusters);
for (PxU32 x = 0; x < width; ++x)
{
for (PxU32 y = 0; y < height; ++y)
{
for (PxU32 z = 0; z < depth; ++z)
{
PxVec3 queryPoint(x * delta.x + offset.x, y * delta.y + offset.y, z * delta.z + offset.z);
PxReal windingNumber = Gu::computeWindingNumber(tree.begin(), queryPoint, clusters, indices, vertices);
windingNumbers[z * width * height + y * width + x] = windingNumber; // > 0.5f ? PxU32(-1) : 0;
if (sampleLocations)
sampleLocations[z * width * height + y * width + x] = queryPoint;
}
}
}
}
struct Range
{
PxI32 mStart;
PxI32 mEnd;
bool mInsideStart;
bool mInsideEnd;
Range(PxI32 start, PxI32 end, bool insideStart, bool insideEnd) : mStart(start), mEnd(end), mInsideStart(insideStart), mInsideEnd(insideEnd) { }
};
struct SDFCalculationData
{
const PxVec3* vertices;
const PxU32* indices;
PxU32 numTriangleIndices;
PxU32 width;
PxU32 height;
PxU32 depth;
PxReal* sdf;
PxVec3* sampleLocations;
GridQueryPointSampler* pointSampler;
PxArray<Gu::BVHNode>* tree;
PxHashMap<PxU32, Gu::ClusterApproximation>* clusters;
PxI32 batchSize = 32;
PxI32 end;
PxI32* progress;
bool optimizeInsideOutsideCalculation; //Toggle to enable an additional optimization for faster inside/outside classification
bool signOnly;
};
void windingNumbersInsideCheck(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
bool* insideResult, PxVec3 min, PxVec3 max, PxVec3* sampleLocations)
{
#if PX_DEBUG
PxBounds3 bounds(min, max);
for (PxU32 i = 0; i < numTriangleIndices; ++i)
PX_ASSERT(bounds.contains(vertices[indices[i]]));
#endif
const PxVec3 extents(max - min);
const PxVec3 delta(extents.x / width, extents.y / height, extents.z / depth);
const PxVec3 offset = min + PxVec3(0.5f * delta.x, 0.5f * delta.y, -0.5f * delta.z);
PxArray<Gu::BVHNode> tree;
buildTree(indices, numTriangleIndices / 3, vertices, tree);
PxHashMap<PxU32, Gu::ClusterApproximation> clusters;
Gu::precomputeClusterInformation(tree.begin(), indices, numTriangleIndices / 3, vertices, clusters);
LineSegmentTrimeshIntersectionTraversalController intersector(indices, vertices, PxVec3(0.0f), PxVec3(0.0f));
PxArray<Range> stack;
for (PxU32 x = 0; x < width; ++x)
{
for (PxU32 y = 0; y < height; ++y)
{
stack.pushBack(Range(0, depth+2, false, false));
while (stack.size() > 0)
{
Range r = stack.popBack();
PxI32 center = (r.mStart + r.mEnd) / 2;
if (center == r.mStart)
{
if (r.mStart > 0 && r.mStart <= PxI32(depth))
{
insideResult[(r.mStart - 1) * width * height + y * width + x] = r.mInsideStart;
if (sampleLocations)
sampleLocations[(r.mStart - 1) * width * height + y * width + x] = PxVec3(x * delta.x + offset.x, y * delta.y + offset.y, r.mStart * delta.z + offset.z);
}
continue;
}
PxVec3 queryPoint = PxVec3(x * delta.x + offset.x, y * delta.y + offset.y, center * delta.z + offset.z);
bool inside = Gu::computeWindingNumber(tree.begin(), queryPoint, clusters, indices, vertices) > 0.5f;
if (inside != r.mInsideStart)
stack.pushBack(Range(r.mStart, center, r.mInsideStart, inside));
else
{
PxVec3 p = PxVec3(x * delta.x + offset.x, y * delta.y + offset.y, r.mStart * delta.z + offset.z);
intersector.reset(p, queryPoint);
Gu::traverseBVH(tree.begin(), intersector);
if (!intersector.intersectionDetected())
{
PxI32 e = PxMin(center, PxI32(depth) + 1);
for (PxI32 z = PxMax(1, r.mStart); z < e; ++z)
{
insideResult[(z - 1) * width * height + y * width + x] = inside;
if (sampleLocations)
sampleLocations[(z - 1) * width * height + y * width + x] = queryPoint;
}
}
else
stack.pushBack(Range(r.mStart, center, r.mInsideStart, inside));
}
if (inside != r.mInsideEnd)
stack.pushBack(Range(center, r.mEnd, inside, r.mInsideEnd));
else
{
PxVec3 p = PxVec3(x * delta.x + offset.x, y * delta.y + offset.y, r.mEnd * delta.z + offset.z);
intersector.reset(queryPoint, p);
Gu::traverseBVH(tree.begin(), intersector);
if (!intersector.intersectionDetected())
{
PxI32 e = PxMin(r.mEnd, PxI32(depth) + 1);
for (PxI32 z = PxMax(1, center); z < e; ++z)
{
insideResult[(z - 1) * width * height + y * width + x] = inside;
if (sampleLocations)
sampleLocations[(z - 1) * width * height + y * width + x] = queryPoint;
}
}
else
stack.pushBack(Range(center, r.mEnd, inside, r.mInsideEnd));
}
}
}
}
}
void idToXY(PxU32 id, PxU32 sizeX, PxU32& xi, PxU32& yi)
{
xi = id % sizeX;
yi = id / sizeX;
}
void* computeSDFThreadJob(void* data)
{
SDFCalculationData& d = *reinterpret_cast<SDFCalculationData*>(data);
PxI32 lastTriangle = -1;
PxArray<Range> stack;
LineSegmentTrimeshIntersectionTraversalController intersector(d.indices, d.vertices, PxVec3(0.0f), PxVec3(0.0f));
PxI32 start = physx::PxAtomicAdd(d.progress, d.batchSize) - d.batchSize;
while (start < d.end)
{
PxI32 end = PxMin(d.end, start + d.batchSize);
PxU32 yStart, zStart;
idToXY(start, d.height, yStart, zStart);
for (PxI32 id = start; id < end; ++id)
{
PxU32 y, z;
idToXY(id, d.height, y, z);
if (y < yStart)
yStart = 0;
if (d.optimizeInsideOutsideCalculation)
{
stack.pushBack(Range(0, d.width + 2, false, false));
while (stack.size() > 0)
{
Range r = stack.popBack();
PxI32 center = (r.mStart + r.mEnd) / 2;
if (center == r.mStart)
{
if (r.mStart > 0 && r.mStart <= PxI32(d.width))
{
if (r.mInsideStart)
d.sdf[z * d.width * d.height + y * d.width + (r.mStart - 1)] *= -1.0f;
}
continue;
}
PxVec3 queryPoint = d.pointSampler->getPoint(center - 1, y, z);
bool inside = false;
bool computeWinding = true;
if (id > start && y > yStart)
{
PxReal s = d.sdf[z * d.width * d.height + (y - 1) * d.width + (center - 1)];
if (PxAbs(s) > d.pointSampler->getActiveCellSize().y)
{
inside = s < 0.0f;
computeWinding = false;
}
}
if (computeWinding)
inside = Gu::computeWindingNumber(d.tree->begin(), queryPoint, *d.clusters, d.indices, d.vertices) > 0.5f;
if (inside != r.mInsideStart)
stack.pushBack(Range(r.mStart, center, r.mInsideStart, inside));
else
{
PxVec3 p = d.pointSampler->getPoint(r.mStart - 1, y, z);
intersector.reset(p, queryPoint);
Gu::traverseBVH(d.tree->begin(), intersector);
if (!intersector.intersectionDetected())
{
PxI32 e = PxMin(center, PxI32(d.width) + 1);
for (PxI32 x = PxMax(1, r.mStart); x < e; ++x)
{
if (inside)
d.sdf[z * d.width * d.height + y * d.width + (x - 1)] *= -1.0f;
}
}
else
stack.pushBack(Range(r.mStart, center, r.mInsideStart, inside));
}
if (inside != r.mInsideEnd)
stack.pushBack(Range(center, r.mEnd, inside, r.mInsideEnd));
else
{
PxVec3 p = d.pointSampler->getPoint(r.mEnd - 1, y, z);
intersector.reset(queryPoint, p);
Gu::traverseBVH(d.tree->begin(), intersector);
if (!intersector.intersectionDetected())
{
PxI32 e = PxMin(r.mEnd, PxI32(d.width) + 1);
for (PxI32 x = PxMax(1, center); x < e; ++x)
{
if (inside)
d.sdf[z * d.width * d.height + y * d.width + (x - 1)] *= -1.0f;
}
}
else
stack.pushBack(Range(center, r.mEnd, inside, r.mInsideEnd));
}
}
}
if (!d.signOnly)
{
for (PxU32 x = 0; x < d.width; ++x)
{
const PxU32 index = z * d.width * d.height + y * d.width + x;
PxVec3 queryPoint = d.pointSampler->getPoint(x, y, z);
ClosestDistanceToTrimeshTraversalController cd(d.indices, d.vertices, d.tree->begin());
cd.setQueryPoint(queryPoint);
if (lastTriangle != -1)
{
//Warm-start the query with a lower-bound distance based on the triangle found by the previous query.
//This helps to cull the tree traversal more effectively in the closest point query.
PxU32 i0 = d.indices[3 * lastTriangle];
PxU32 i1 = d.indices[3 * lastTriangle + 1];
PxU32 i2 = d.indices[3 * lastTriangle + 2];
//const PxVec3 closest = Gu::closestPtPointTriangle2UnitBox(queryPoint, d.vertices[i0], d.vertices[i1], d.vertices[i2]);
//PxReal d2 = (closest - queryPoint).magnitudeSquared();
aos::FloatV t1, t2;
aos::Vec3V q = aos::V3LoadU(queryPoint);
aos::Vec3V a = aos::V3LoadU(d.vertices[i0]);
aos::Vec3V b = aos::V3LoadU(d.vertices[i1]);
aos::Vec3V c = aos::V3LoadU(d.vertices[i2]);
aos::Vec3V cp;
aos::FloatV dist2 = Gu::distancePointTriangleSquared2UnitBox(q, a, b, c, t1, t2, cp);
PxReal d2;
aos::FStore(dist2, &d2);
PxVec3 closest;
aos::V3StoreU(cp, closest);
cd.setClosestStart(d2, lastTriangle, closest);
}
Gu::traverseBVH(d.tree->begin(), cd);
PxVec3 closestPoint = cd.getClosestPoint();
PxReal closestDistance = (closestPoint - queryPoint).magnitude();
lastTriangle = cd.getClosestTriId();
PxReal sign = 1.f;
if (!d.optimizeInsideOutsideCalculation)
{
PxReal windingNumber = Gu::computeWindingNumber(d.tree->begin(), queryPoint, *d.clusters, d.indices, d.vertices);
sign = windingNumber > 0.5f ? -1.f : 1.f;
}
d.sdf[index] *= closestDistance * sign;
if (d.sampleLocations)
d.sampleLocations[index] = queryPoint;
}
}
}
start = physx::PxAtomicAdd(d.progress, d.batchSize) - d.batchSize;
}
return NULL;
}
struct PxI32x3
{
PxI32x3(PxI32 x_, PxI32 y_, PxI32 z_) : x(x_), y(y_), z(z_)
{}
PxI32 x;
PxI32 y;
PxI32 z;
};
//Applies per pixel operations similar to the one uses by the fast marching methods to build SDFs out of binary image bitmaps
//This allows to fill in correct distance values in regions where meshes habe holes
struct PixelProcessor
{
PxVec3 mCellSize;
PxI32 mWidth;
PxI32 mHeight;
PxI32 mDepth;
PixelProcessor(PxVec3 cellSize, PxI32 width, PxI32 height, PxI32 depth) :
mCellSize(cellSize), mWidth(width), mHeight(height), mDepth(depth)
{
}
//Estimates distance values near at mesh holes by estimating the location of the mesh surface. This can be done by analyzing
//the sign change of the imperfect SDF. The signs are computed using winding numbers which are immune to meshes with holes.
bool init(PxI32x3 p, const PxReal* sdf, PxReal& newValue) const
{
PxReal initialValue = sdf[idx3D(p.x, p.y, p.z, mWidth, mHeight)];
newValue = PxAbs(initialValue);
for (PxI32 z = PxMax(0, p.z - 1); z <= PxMin(mDepth - 1, p.z + 1); ++z)
for (PxI32 y = PxMax(0, p.y - 1); y <= PxMin(mHeight - 1, p.y + 1); ++y)
for (PxI32 x = PxMax(0, p.x - 1); x <= PxMin(mWidth - 1, p.x + 1); ++x)
{
if (x == p.x && y == p.y && z == p.z)
continue;
PxReal value = sdf[idx3D(x, y, z, mWidth, mHeight)];
if (PxSign(initialValue) != PxSign(value))
{
PxReal distance = 0;
if (x != p.x)
distance += mCellSize.x*mCellSize.x;
if (y != p.y)
distance += mCellSize.y*mCellSize.y;
if (z != p.z)
distance += mCellSize.z*mCellSize.z;
distance = PxSqrt(distance);
PxReal delta = PxAbs(value - initialValue);
if (0.99f * delta > distance)
{
PxReal scaling = distance / delta;
PxReal v = 0.99f * scaling * initialValue;
newValue = PxMin(newValue, PxAbs(v));
}
}
}
if (initialValue < 0)
newValue = -newValue;
if (newValue !=initialValue)
return true;
return false;
}
//Processes a pixel in a 3D sdf by applying the rule from the fast marching method. Only works on pixels with the same sign.
bool process(PxI32x3 p, PxReal* sdf, PxReal& newValue) const
{
PxReal initialValue = sdf[idx3D(p.x, p.y, p.z, mWidth, mHeight)];
if (initialValue == 0.0f)
return false;
PxReal sign = PxSign(initialValue);
newValue = PxAbs(initialValue);
for (PxI32 z = PxMax(0, p.z - 1); z <= PxMin(mDepth - 1, p.z + 1); ++z)
for (PxI32 y = PxMax(0, p.y - 1); y <= PxMin(mHeight - 1, p.y + 1); ++y)
for (PxI32 x = PxMax(0, p.x - 1); x <= PxMin(mWidth - 1, p.x + 1); ++x)
{
if (x == p.x && y == p.y && z == p.z)
continue;
PxReal value = sdf[idx3D(x, y, z, mWidth, mHeight)];
if (sign == PxSign(value))
{
PxReal distance = 0;
if (x != p.x)
distance += mCellSize.x*mCellSize.x;
if (y != p.y)
distance += mCellSize.y*mCellSize.y;
if (z != p.z)
distance += mCellSize.z*mCellSize.z;
distance = PxSqrt(distance);
PxReal absValue = PxAbs(value);
if(absValue + 1.01f*distance < newValue)
newValue = absValue + distance;
}
}
newValue = sign * newValue;
if (newValue != initialValue)
{
sdf[idx3D(p.x, p.y, p.z, mWidth, mHeight)] = newValue;
return true;
}
return false;
}
};
//Allows to store the new value of a SDF pixel to apply the change later. This avoids the need of double buffering the SDF data.
struct Mutation
{
PxI32x3 mIndex;
PxReal mNewValue;
Mutation(const PxI32x3& index, PxReal newValue) : mIndex(index), mNewValue(newValue)
{
}
};
void applyMutations(PxArray<Mutation>& mutations, PxU32 start, PxU32 end, PxReal* sdfs, PxU32 width, PxU32 height)
{
for (PxU32 i = start; i < end; ++i)
{
Mutation m = mutations[i];
sdfs[idx3D(m.mIndex.x, m.mIndex.y, m.mIndex.z, width, height)] = m.mNewValue;
}
}
//Approximates the solution of an Eikonal equation on a dense grid
void fixSdfForNonClosedGeometry(PxU32 width, PxU32 height, PxU32 depth,
PxReal* sdf, const PxVec3& cellSize)
{
PxArray<Mutation> mutations;
PixelProcessor processor(cellSize, width, height, depth);
for (PxU32 z = 0; z < depth; ++z)
for (PxU32 y = 0; y < height; ++y)
for (PxU32 x = 0; x < width; ++x)
{
//Process only cells where a sign change occurs
PxReal newValue;
if (processor.init(PxI32x3(x, y, z), sdf, newValue))
mutations.pushBack(Mutation(PxI32x3(x, y, z), newValue));
}
//printf("numMutations: %i\n", mutations.size());
applyMutations(mutations, 0, mutations.size(), sdf, width, height);
PxU32 maxMutationLoops = 1000;
PxU32 counter = 0;
while (mutations.size() > 0 && counter < maxMutationLoops)
{
PxU32 size = mutations.size();
for (PxU32 i = 0; i < size; ++i)
{
PxI32x3 p = mutations[i].mIndex;
//Process neighbors of item on stack
for (PxI32 z = PxMax(0, p.z - 1); z <= PxMin(PxI32(depth) - 1, p.z + 1); ++z)
for (PxI32 y = PxMax(0, p.y - 1); y <= PxMin(PxI32(height) - 1, p.y + 1); ++y)
for (PxI32 x = PxMax(0, p.x - 1); x <= PxMin(PxI32(width) - 1, p.x + 1); ++x)
{
if (x == p.x && y == p.y && z == p.z)
continue;
PxReal newValue;
if (processor.process(PxI32x3(x, y, z), sdf, newValue))
mutations.pushBack(Mutation(PxI32x3(x, y, z), newValue));
}
}
mutations.removeRange(0, size);
++counter;
}
//For safety reasons: Check all cells again
for (PxU32 z = 0; z < depth; ++z)
for (PxU32 y = 0; y < height; ++y)
for (PxU32 x = 0; x < width; ++x)
{
//Look at all neighbors
PxReal newValue;
if (processor.init(PxI32x3(x, y, z), sdf, newValue))
mutations.pushBack(Mutation(PxI32x3(x, y, z), newValue));
}
counter = 0;
while (mutations.size() > 0 && counter < maxMutationLoops)
{
PxU32 size = mutations.size();
for (PxU32 i = 0; i < size; ++i)
{
PxI32x3 p = mutations[i].mIndex;
//Process neighbors of item on stack
for (PxI32 z = PxMax(0, p.z - 1); z <= PxMin(PxI32(depth) - 1, p.z + 1); ++z)
for (PxI32 y = PxMax(0, p.y - 1); y <= PxMin(PxI32(height) - 1, p.y + 1); ++y)
for (PxI32 x = PxMax(0, p.x - 1); x <= PxMin(PxI32(width) - 1, p.x + 1); ++x)
{
if (x == p.x && y == p.y && z == p.z)
continue;
PxReal newValue;
if (processor.process(PxI32x3(x, y, z), sdf, newValue))
mutations.pushBack(Mutation(PxI32x3(x, y, z), newValue));
}
}
mutations.removeRange(0, size);
++counter;
}
}
void SDFUsingWindingNumbers(PxArray<Gu::BVHNode>& tree, PxHashMap<PxU32, Gu::ClusterApproximation>& clusters, const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
PxReal* sdf, GridQueryPointSampler& sampler, PxVec3* sampleLocations, PxU32 numThreads, bool isWatertight, bool allVerticesInsideSamplingBox)
{
bool optimizeInsideOutsideCalculation = allVerticesInsideSamplingBox && isWatertight;
numThreads = PxMax(numThreads, 1u);
PxI32 progress = 0;
PxArray<PxThread*> threads;
PxArray<SDFCalculationData> perThreadData;
for (PxU32 i = 0; i < numThreads; ++i)
{
perThreadData.pushBack(SDFCalculationData());
SDFCalculationData& d = perThreadData[i];
d.vertices = vertices;
d.indices = indices;
d.numTriangleIndices = numTriangleIndices;
d.width = width;
d.height = height;
d.depth = depth;
d.sdf = sdf;
d.sampleLocations = sampleLocations;
d.optimizeInsideOutsideCalculation = optimizeInsideOutsideCalculation;
d.pointSampler = &sampler;
d.progress = &progress;
d.tree = &tree;
d.clusters = &clusters;
d.end = depth * height;
d.signOnly = false;
}
PxU32 l = width * height * depth;
for (PxU32 i = 0; i < l; ++i)
sdf[i] = 1.0f;
for (PxU32 i = 0; i < numThreads; ++i)
{
if (perThreadData.size() == 1)
computeSDFThreadJob(&perThreadData[i]);
else
{
threads.pushBack(PX_NEW(PxThread)(computeSDFThreadJob, &perThreadData[i], "thread"));
threads[i]->start();
}
}
for (PxU32 i = 0; i < threads.size(); ++i)
{
threads[i]->waitForQuit();
}
for (PxU32 i = 0; i < threads.size(); ++i)
{
threads[i]->~PxThreadT();
PX_FREE(threads[i]);
}
if (!isWatertight)
fixSdfForNonClosedGeometry(width, height, depth, sdf, sampler.getActiveCellSize());
}
//Helper class to extract surface triangles from a tetmesh
struct SortedTriangle
{
public:
PxI32 mA;
PxI32 mB;
PxI32 mC;
bool mFlipped;
PX_FORCE_INLINE SortedTriangle(PxI32 a, PxI32 b, PxI32 c)
{
mA = a; mB = b; mC = c; mFlipped = false;
if (mA > mB) { PxSwap(mA, mB); mFlipped = !mFlipped; }
if (mB > mC) { PxSwap(mB, mC); mFlipped = !mFlipped; }
if (mA > mB) { PxSwap(mA, mB); mFlipped = !mFlipped; }
}
};
struct TriangleHash
{
PX_FORCE_INLINE std::size_t operator()(const SortedTriangle& k) const
{
return k.mA ^ k.mB ^ k.mC;
}
PX_FORCE_INLINE bool equal(const SortedTriangle& first, const SortedTriangle& second) const
{
return first.mA == second.mA && first.mB == second.mB && first.mC == second.mC;
}
};
PxReal signedVolume(const PxVec3* points, const PxU32* triangles, PxU32 numTriangles, const PxU32* triangleSubset = NULL, PxU32 setLength = 0)
{
PxReal signedVolume = 0;
const PxU32 l = triangleSubset ? setLength : numTriangles;
for (PxU32 j = 0; j < l; ++j)
{
const PxU32 i = triangleSubset ? triangleSubset[j] : j;
const PxU32* tri = &triangles[3 * i];
PxVec3 a = points[tri[0]];
PxVec3 b = points[tri[1]];
PxVec3 c = points[tri[2]];
PxReal y = a.dot(b.cross(c));
signedVolume += y;
}
signedVolume *= (1.0f / 6.0f);
return signedVolume;
}
void analyzeAndFixMesh(const PxVec3* vertices, const PxU32* indicesOrig, PxU32 numTriangleIndices, PxArray<PxU32>& repairedIndices)
{
const PxU32* indices = indicesOrig;
PxI32 numVertices = -1;
for (PxU32 i = 0; i < numTriangleIndices; ++i)
numVertices = PxMax(numVertices, PxI32(indices[i]));
++numVertices;
//Check for duplicate vertices
PxArray<PxI32> map;
MeshAnalyzer::mapDuplicatePoints<PxVec3, PxReal>(vertices, PxU32(numVertices), map, 0.0f);
bool hasDuplicateVertices = false;
for (PxU32 i = 0; i < map.size(); ++i)
{
if (map[i] != PxI32(i))
{
hasDuplicateVertices = true;
break;
}
}
if (hasDuplicateVertices)
{
repairedIndices.resize(numTriangleIndices);
for (PxU32 i = 0; i < numTriangleIndices; ++i)
repairedIndices[i] = map[indices[i]];
indices = repairedIndices.begin();
}
//Check for duplicate triangles
PxHashMap<SortedTriangle, PxI32, TriangleHash> tris;
bool hasDuplicateTriangles = false;
for (PxU32 i = 0; i < numTriangleIndices; i += 3)
{
SortedTriangle tri(indices[i], indices[i + 1], indices[i + 2]);
if (const PxPair<const SortedTriangle, PxI32>* ptr = tris.find(tri))
{
tris[tri] = ptr->second + 1;
hasDuplicateTriangles = true;
}
else
tris.insert(tri, 1);
}
if (hasDuplicateTriangles)
{
repairedIndices.clear();
for (PxHashMap<SortedTriangle, PxI32, TriangleHash>::Iterator iter = tris.getIterator(); !iter.done(); ++iter)
{
repairedIndices.pushBack(iter->first.mA);
if (iter->first.mFlipped)
{
repairedIndices.pushBack(iter->first.mC);
repairedIndices.pushBack(iter->first.mB);
}
else
{
repairedIndices.pushBack(iter->first.mB);
repairedIndices.pushBack(iter->first.mC);
}
}
}
else
{
if (!hasDuplicateVertices) //reqairedIndices is already initialized if hasDuplicateVertices is true
{
repairedIndices.resize(numTriangleIndices);
for (PxU32 i = 0; i < numTriangleIndices; ++i)
repairedIndices[i] = indices[i];
}
}
PxHashMap<PxU64, PxI32> edges;
PxArray<bool> flipTriangle;
PxArray<PxArray<PxU32>> connectedTriangleGroups;
Triangle* triangles = reinterpret_cast<Triangle*>(repairedIndices.begin());
bool success = MeshAnalyzer::buildConsistentTriangleOrientationMap(triangles, repairedIndices.size() / 3, flipTriangle, edges, connectedTriangleGroups);
bool meshIsWatertight = true;
for (PxHashMap<PxU64, PxI32>::Iterator iter = edges.getIterator(); !iter.done(); ++iter)
{
if (iter->second != -1)
{
meshIsWatertight = false;
break;
}
}
if (success)
{
if (hasDuplicateTriangles && meshIsWatertight && connectedTriangleGroups.size() == 1)
{
for (PxU32 i = 0; i < flipTriangle.size(); ++i)
{
Triangle& t = triangles[i];
if (flipTriangle[i])
PxSwap(t[0], t[1]);
}
if (signedVolume(vertices, repairedIndices.begin(), repairedIndices.size() / 3) < 0.0f)
{
PxU32 numTriangles = repairedIndices.size() / 3;
for (PxU32 j = 0; j < numTriangles; ++j)
{
PxU32* tri = &repairedIndices[j * 3];
PxSwap(tri[1], tri[2]);
}
}
}
}
else
{
//Here it is not possible to guarantee that the mesh fixing can succeed
PxGetFoundation().error(PxErrorCode::eDEBUG_WARNING, PX_FL, "SDF creation: Mesh is not suitable for SDF (non-manifold, not watertight, duplicated triangles, ...) and automatic repair failed. The computed SDF might not work as expected. If collisions are not good, try to improve the mesh structure e.g., by applying remeshing.");
//connectedTriangleGroups won't have any elements, so return
return;
}
if (!meshIsWatertight)
{
PxGetFoundation().error(PxErrorCode::eDEBUG_WARNING, PX_FL, "SDF creation: Input mesh is not watertight. The SDF will try to close the holes.");
}
}
void SDFUsingWindingNumbers(const PxVec3* vertices, const PxU32* indicesOrig, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
PxReal* sdf, PxVec3 minExtents, PxVec3 maxExtents, PxVec3* sampleLocations, bool cellCenteredSamples, PxU32 numThreads, PxSDFBuilder* sdfBuilder)
{
PxArray<PxU32> repairedIndices;
//Analyze the mesh to catch and fix some special cases
//There are meshes where every triangle is present once with cw and once with ccw orientation. Try to filter out only one set
analyzeAndFixMesh(vertices, indicesOrig, numTriangleIndices, repairedIndices);
const PxU32* indices = repairedIndices.size() > 0 ? repairedIndices.begin() : indicesOrig;
if (repairedIndices.size() > 0)
numTriangleIndices = repairedIndices.size();
if (sdfBuilder)
{
PxI32 numVertices = -1;
for (PxU32 i = 0; i < numTriangleIndices; ++i)
numVertices = PxMax(numVertices, PxI32(indices[i]));
++numVertices;
sdfBuilder->buildSDF(vertices, PxU32(numVertices), indices, numTriangleIndices, width, height, depth, minExtents, maxExtents, cellCenteredSamples, sdf);
}
else
{
PxArray<Gu::BVHNode> tree;
buildTree(indices, numTriangleIndices / 3, vertices, tree);
PxHashMap<PxU32, Gu::ClusterApproximation> clusters;
Gu::precomputeClusterInformation(tree.begin(), indices, numTriangleIndices / 3, vertices, clusters);
const PxVec3 extents(maxExtents - minExtents);
GridQueryPointSampler sampler(minExtents, PxVec3(extents.x / width, extents.y / height, extents.z / depth), cellCenteredSamples);
bool isWatertight = MeshAnalyzer::checkMeshWatertightness(reinterpret_cast<const Triangle*>(indices), numTriangleIndices / 3);
bool allSamplesInsideBox = true;
PxBounds3 box(minExtents, maxExtents);
for (PxU32 i = 0; i < numTriangleIndices; ++i)
{
PxVec3 v = vertices[indices[i]];
if (!box.contains(v))
{
allSamplesInsideBox = false;
break;
}
}
SDFUsingWindingNumbers(tree, clusters, vertices, indices, numTriangleIndices, width, height, depth, sdf, sampler, sampleLocations, numThreads, isWatertight, allSamplesInsideBox);
}
#if EXTENDED_DEBUG
bool debug = false;
if (debug && sdfBuilder)
{
PX_UNUSED(sdfBuilder);
PxArray<PxReal> sdf2;
sdf2.resize(width * height * depth);
PxI32 numVertices = -1;
for (PxU32 i = 0; i < numTriangleIndices; ++i)
numVertices = PxMax(numVertices, PxI32(indices[i]));
++numVertices;
sdfBuilder->buildSDF(vertices, PxU32(numVertices), indices, numTriangleIndices, width, height, depth, minExtents, maxExtents, cellCenteredSamples, sdf2.begin());
for (PxU32 i = 0; i < sdf2.size(); ++i)
{
PxReal diff = sdf[i] - sdf2[i];
//PxReal diffOfAbs = PxAbs(sdf[i]) - PxAbs(sdf2[i]);
if(PxAbs(diff) > 1e-3f)
PxGetFoundation().error(physx::PxErrorCode::eDEBUG_WARNING, PX_FL, "SDFs don't match %f %f", PxF64(sdf[i]), PxF64(sdf2[i]));
}
}
#endif
}
void convertSparseSDFTo3DTextureLayout(PxU32 width, PxU32 height, PxU32 depth, PxU32 cellsPerSubgrid,
PxU32* sdfFineStartSlots, const PxReal* sdfFineSubgridsIn, PxU32 sparseSDFNumEntries, PxArray<PxReal>& subgrids3DTexFormat,
PxU32& numSubgridsX, PxU32& numSubgridsY, PxU32& numSubgridsZ)
{
PxU32 valuesPerSubgrid = (cellsPerSubgrid + 1)*(cellsPerSubgrid + 1)*(cellsPerSubgrid + 1);
PX_ASSERT(sparseSDFNumEntries % valuesPerSubgrid == 0);
PxU32 numSubgrids = sparseSDFNumEntries / valuesPerSubgrid;
PxReal cubicRoot = PxPow(PxReal(numSubgrids), 1.0f / 3.0f);
PxU32 up = PxMax(1u, PxU32(PxCeil(cubicRoot)));
PxU32 debug = numSubgrids;
//Arrange numSubgrids in a 3d layout
PxU32 n = numSubgrids;
numSubgridsX = PxMin(up, n);
n = (n + up - 1) / up;
numSubgridsY = PxMin(up, n);
n = (n + up - 1) / up;
numSubgridsZ = PxMin(up, n);
PxU32 debug2 = numSubgridsX * numSubgridsY * numSubgridsZ;
PX_ASSERT(debug2 >= debug);
PX_UNUSED(debug);
PX_UNUSED(debug2);
PxU32 size = valuesPerSubgrid * numSubgridsX * numSubgridsY * numSubgridsZ;
PxReal placeholder = 1234567;
subgrids3DTexFormat.resize(size, placeholder);
PxU32 w = width / cellsPerSubgrid;
PxU32 h = height / cellsPerSubgrid;
PxU32 d = depth / cellsPerSubgrid;
PxU32 l = (w)*(h)*(d);
for (PxU32 i = 0; i < l; ++i)
{
PxU32 startSlot = sdfFineStartSlots[i];
if (startSlot != 0xFFFFFFFF)
{
PxU32 baseIndex = startSlot * (cellsPerSubgrid + 1) * (cellsPerSubgrid + 1) * (cellsPerSubgrid + 1);
const PxReal* sdfFine = &sdfFineSubgridsIn[baseIndex];
PxU32 startSlotX, startSlotY, startSlotZ;
idToXYZ(startSlot, numSubgridsX, numSubgridsY, startSlotX, startSlotY, startSlotZ);
sdfFineStartSlots[i] = encodeTriple(startSlotX, startSlotY, startSlotZ);
for (PxU32 zLocal = 0; zLocal <= cellsPerSubgrid; ++zLocal)
{
for (PxU32 yLocal = 0; yLocal <= cellsPerSubgrid; ++yLocal)
{
for (PxU32 xLocal = 0; xLocal <= cellsPerSubgrid; ++xLocal)
{
PxReal sdfValue = sdfFine[idx3D(xLocal, yLocal, zLocal, cellsPerSubgrid+1, cellsPerSubgrid+1)];
PxU32 index = idx3D(xLocal + startSlotX * (cellsPerSubgrid + 1), yLocal + startSlotY * (cellsPerSubgrid + 1), zLocal + startSlotZ * (cellsPerSubgrid + 1),
numSubgridsX * (cellsPerSubgrid + 1), numSubgridsY * (cellsPerSubgrid + 1));
PX_ASSERT(subgrids3DTexFormat[index] == placeholder);
subgrids3DTexFormat[index] = sdfValue;
PX_ASSERT(PxIsFinite(sdfValue));
}
}
}
}
}
}
struct Interval
{
PxReal min;
PxReal max;
PX_CUDA_CALLABLE Interval() : min(FLT_MAX), max(-FLT_MAX)
{}
PX_CUDA_CALLABLE Interval(PxReal min_, PxReal max_) : min(min_), max(max_)
{}
PX_FORCE_INLINE PX_CUDA_CALLABLE bool overlaps(const Interval& i)
{
return !(min > i.max || i.min > max);
}
};
void SDFUsingWindingNumbersSparse(const PxVec3* vertices, const PxU32* indices, PxU32 numTriangleIndices, PxU32 width, PxU32 height, PxU32 depth,
const PxVec3& minExtents, const PxVec3& maxExtents, PxReal narrowBandThickness, PxU32 cellsPerSubgrid,
PxArray<PxReal>& sdfCoarse, PxArray<PxU32>& sdfFineStartSlots, PxArray<PxReal>& subgridData, PxArray<PxReal>& denseSdf,
PxReal& subgridsMinSdfValue, PxReal& subgridsMaxSdfValue, PxU32 numThreads, PxSDFBuilder* sdfBuilder)
{
PX_ASSERT(width % cellsPerSubgrid == 0);
PX_ASSERT(height % cellsPerSubgrid == 0);
PX_ASSERT(depth % cellsPerSubgrid == 0);
const PxVec3 extents(maxExtents - minExtents);
const PxVec3 delta(extents.x / width, extents.y / height, extents.z / depth);
const PxU32 w = width / cellsPerSubgrid;
const PxU32 h = height / cellsPerSubgrid;
const PxU32 d = depth / cellsPerSubgrid;
denseSdf.resize((width + 1) * (height + 1) * (depth + 1));
SDFUsingWindingNumbers(vertices, indices, numTriangleIndices, width + 1, height + 1, depth + 1, denseSdf.begin(), minExtents, maxExtents + delta, NULL, false, numThreads, sdfBuilder);
sdfCoarse.clear();
sdfFineStartSlots.clear();
subgridData.clear();
sdfCoarse.reserve((w + 1) * (h + 1) * (d + 1));
sdfFineStartSlots.reserve(w * h * d);
for (PxU32 zBlock = 0; zBlock < d; ++zBlock)
for (PxU32 yBlock = 0; yBlock < h; ++yBlock)
for (PxU32 xBlock = 0; xBlock < w; ++xBlock)
{
sdfFineStartSlots.pushBack(0xFFFFFFFF);
}
for (PxU32 zBlock = 0; zBlock <= d; ++zBlock)
for (PxU32 yBlock = 0; yBlock <= h; ++yBlock)
for (PxU32 xBlock = 0; xBlock <= w; ++xBlock)
{
PxU32 x = xBlock * cellsPerSubgrid;
PxU32 y = yBlock * cellsPerSubgrid;
PxU32 z = zBlock * cellsPerSubgrid;
const PxU32 index = idx3D(x, y, z, width+1, height+1);
PX_ASSERT(index < denseSdf.size());
PxReal sdfValue = denseSdf[index];
sdfCoarse.pushBack(sdfValue);
}
#if DEBUG
for (PxU32 zBlock = 0; zBlock <= d; ++zBlock)
for (PxU32 yBlock = 0; yBlock <= h; ++yBlock)
for (PxU32 xBlock = 0; xBlock <= w; ++xBlock)
{
PxU32 x = xBlock * cellsPerSubgrid;
PxU32 y = yBlock * cellsPerSubgrid;
PxU32 z = zBlock * cellsPerSubgrid;
const PxU32 index = idx3D(x, y, z, width+1, height+1);
const PxU32 indexCoarse = idx3D(xBlock, yBlock, zBlock, w+1, h+1);
PX_ASSERT(sdfCoarse[indexCoarse] == denseSdf[index]);
PX_UNUSED(indexCoarse);
PX_UNUSED(index);
}
#endif
Interval narrowBandInterval(-narrowBandThickness, narrowBandThickness);
DenseSDF coarseEval(w + 1, h + 1, d + 1, sdfCoarse.begin());
PxReal s = 1.0f / cellsPerSubgrid;
const PxReal errorThreshold = 1e-6f * extents.magnitude();
PxU32 subgridIndexer = 0;
subgridsMaxSdfValue = -FLT_MAX;
subgridsMinSdfValue = FLT_MAX;
for (PxU32 zBlock = 0; zBlock < d; ++zBlock)
{
for (PxU32 yBlock = 0; yBlock < h; ++yBlock)
{
for (PxU32 xBlock = 0; xBlock < w; ++xBlock)
{
bool subgridRequired = false;
Interval inverval;
PxReal maxAbsError = 0.0f;
for (PxU32 zLocal = 0; zLocal <= cellsPerSubgrid; ++zLocal)
{
for (PxU32 yLocal = 0; yLocal <= cellsPerSubgrid; ++yLocal)
{
for (PxU32 xLocal = 0; xLocal <= cellsPerSubgrid; ++xLocal)
{
PxU32 x = xBlock * cellsPerSubgrid + xLocal;
PxU32 y = yBlock * cellsPerSubgrid + yLocal;
PxU32 z = zBlock * cellsPerSubgrid + zLocal;
const PxU32 index = idx3D(x, y, z, width+1, height+1);
PxReal sdfValue = denseSdf[index];
inverval.max = PxMax(inverval.max, sdfValue);
inverval.min = PxMin(inverval.min, sdfValue);
maxAbsError = PxMax(maxAbsError, PxAbs(sdfValue - coarseEval.sampleSDFDirect(PxVec3(xBlock + xLocal * s, yBlock + yLocal * s, zBlock + zLocal * s))));
}
}
}
subgridRequired = narrowBandInterval.overlaps(inverval);
if (maxAbsError < errorThreshold)
subgridRequired = false; //No need for a subgrid if the coarse SDF is already almost exact
if (subgridRequired)
{
subgridsMaxSdfValue = PxMax(subgridsMaxSdfValue, inverval.max);
subgridsMinSdfValue = PxMin(subgridsMinSdfValue, inverval.min);
for (PxU32 zLocal = 0; zLocal <= cellsPerSubgrid; ++zLocal)
{
for (PxU32 yLocal = 0; yLocal <= cellsPerSubgrid; ++yLocal)
{
for (PxU32 xLocal = 0; xLocal <= cellsPerSubgrid; ++xLocal)
{
PxU32 x = xBlock * cellsPerSubgrid + xLocal;
PxU32 y = yBlock * cellsPerSubgrid + yLocal;
PxU32 z = zBlock * cellsPerSubgrid + zLocal;
const PxU32 index = z * (width + 1) * (height + 1) + y * (width + 1) + x;
PxReal sdfValue = denseSdf[index];
subgridData.pushBack(sdfValue);
}
}
}
sdfFineStartSlots[idx3D(xBlock, yBlock, zBlock, w, h)] = subgridIndexer;
++subgridIndexer;
}
}
}
}
}
PX_INLINE PxReal decodeSparse2(const SDF& sdf, PxI32 xx, PxI32 yy, PxI32 zz)
{
if (xx < 0 || yy < 0 || zz < 0 || xx > PxI32(sdf.mDims.x) || yy > PxI32(sdf.mDims.y) || zz > PxI32(sdf.mDims.z))
return 1.0f; //Return a value >0 that counts as outside
const PxU32 nbX = sdf.mDims.x / sdf.mSubgridSize;
const PxU32 nbY = sdf.mDims.y / sdf.mSubgridSize;
const PxU32 nbZ = sdf.mDims.z / sdf.mSubgridSize;
PxU32 xBase = xx / sdf.mSubgridSize;
PxU32 yBase = yy / sdf.mSubgridSize;
PxU32 zBase = zz / sdf.mSubgridSize;
PxU32 x = xx % sdf.mSubgridSize;
PxU32 y = yy % sdf.mSubgridSize;
PxU32 z = zz % sdf.mSubgridSize;
if (xBase == nbX)
{
--xBase;
x = sdf.mSubgridSize;
}
if (yBase == nbY)
{
--yBase;
y = sdf.mSubgridSize;
}
if (zBase == nbZ)
{
--zBase;
z = sdf.mSubgridSize;
}
PxU32 startId = sdf.mSubgridStartSlots[zBase * (nbX) * (nbY)+yBase * (nbX)+xBase];
if (startId != 0xFFFFFFFFu)
{
SDF::decodeTriple(startId, xBase, yBase, zBase);
/*if (xBase >= mSdfSubgrids3DTexBlockDim.x || yBase >= mSdfSubgrids3DTexBlockDim.y || zBase >= mSdfSubgrids3DTexBlockDim.z)
{
PxGetFoundation().error(::physx::PxErrorCode::eINTERNAL_ERROR, PX_FL, "Out of bounds subgrid index\n");
//printf("%i %i %i %i\n", PxI32(startId), PxI32(xBase), PxI32(yBase), PxI32(zBase));
}*/
xBase *= (sdf.mSubgridSize + 1);
yBase *= (sdf.mSubgridSize + 1);
zBase *= (sdf.mSubgridSize + 1);
const PxU32 w = sdf.mSdfSubgrids3DTexBlockDim.x * (sdf.mSubgridSize + 1);
const PxU32 h = sdf.mSdfSubgrids3DTexBlockDim.y * (sdf.mSubgridSize + 1);
const PxU32 index = idx3D(xBase + x, yBase + y, zBase + z, w, h);
//if (mBytesPerSparsePixel * index >= mNumSubgridSdfs)
// PxGetFoundation().error(::physx::PxErrorCode::eINTERNAL_ERROR, PX_FL, "Out of bounds sdf subgrid access\n");
return SDF::decodeSample(sdf.mSubgridSdf, index,
sdf.mBytesPerSparsePixel, sdf.mSubgridsMinSdfValue, sdf.mSubgridsMaxSdfValue);
}
else
{
DenseSDF coarseEval(nbX + 1, nbY + 1, nbZ + 1, sdf.mSdf);
PxReal s = 1.0f / sdf.mSubgridSize;
PxReal result = coarseEval.sampleSDFDirect(PxVec3(xBase + x * s, yBase + y * s, zBase + z * s));
return result;
}
}
PxReal SDF::decodeSparse(PxI32 xx, PxI32 yy, PxI32 zz) const
{
return decodeSparse2(*this, xx, yy, zz);
}
PX_FORCE_INLINE PxU64 key(PxI32 xId, PxI32 yId, PxI32 zId)
{
const PxI32 offset = 1 << 19;
return (PxU64(zId + offset) << 42) | (PxU64(yId + offset) << 21) | (PxU64(xId + offset) << 0);
}
const PxI32 offsets[3][3][3] = { { {0,-1,0}, {0,-1,-1}, {0,0,-1} },
{ {0,0,-1}, {-1,0,-1}, {-1,0,0} } ,
{ {-1,0,0}, {-1,-1,0}, {0,-1,0} } };
const PxI32 projections[3][2] = { {1, 2}, {2, 0}, {0, 1} };
PX_FORCE_INLINE PxReal dirSign(PxI32 principalDirection, const PxVec3& start, const PxVec3& middle, const PxVec3& end)
{
PxReal a0 = middle[projections[principalDirection][0]] - start[projections[principalDirection][0]];
PxReal a1 = middle[projections[principalDirection][1]] - start[projections[principalDirection][1]];
PxReal b0 = end[projections[principalDirection][0]] - middle[projections[principalDirection][0]];
PxReal b1 = end[projections[principalDirection][1]] - middle[projections[principalDirection][1]];
return a0 * b1 - a1 * b0;
}
PX_FORCE_INLINE PxI32 indexOfMostConcaveCorner(PxI32 principalDirection, const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d)
{
PxReal minimum = 0;
PxI32 result = -1;
PxReal s = dirSign(principalDirection, a, b, c);
if (s <= minimum)
{
minimum = s;
result = 1;
}
s = dirSign(principalDirection, b, c, d);
if (s <= minimum)
{
minimum = s;
result = 2;
}
s = dirSign(principalDirection, c, d, a);
if (s <= minimum)
{
minimum = s;
result = 3;
}
s = dirSign(principalDirection, d, a, b);
if (s <= minimum)
{
minimum = s;
result = 0;
}
return result;
}
bool generatePointInCell(const Gu::SDF& sdf, PxI32 x, PxI32 y, PxI32 z, PxVec3& point, PxReal corners[2][2][2])
{
const PxReal threshold = 0.0f;
PxU32 positiveCounter = 0;
PxU32 negativeCounter = 0;
for (PxI32 xx = 0; xx <= 1; ++xx) for (PxI32 yy = 0; yy <= 1; ++yy) for (PxI32 zz = 0; zz <= 1; ++zz)
{
PxReal v = corners[xx][yy][zz];
if (v > 0)
++positiveCounter;
if (v < 0)
++negativeCounter;
}
PxBounds3 box;
box.minimum = sdf.mMeshLower + PxVec3(x * sdf.mSpacing, y * sdf.mSpacing, z * sdf.mSpacing);
box.maximum = box.minimum + PxVec3(sdf.mSpacing);
if (positiveCounter == 8 || negativeCounter == 8)
{
//Nothing to do because surface does not cross the current cell
}
else
{
//If point is not completely inside or outside, then find a point inside the cube that divides it into 8 cuboids
PxU32 counter = 0;
PxVec3 sum(0.0f);
for (PxI32 a = 0; a <= 1; ++a) for (PxI32 b = 0; b <= 1; ++b)
{
PxReal p = corners[a][b][0];
PxReal q = corners[a][b][1];
if ((p <= threshold && q >= threshold) || (q <= threshold && p >= threshold))
{
PxReal t = (q != p) ? PxClamp((threshold - p) / (q - p), 0.0f, 1.0f) : 0.5f;
sum += PxVec3(PxReal(a), PxReal(b), t);
++counter;
}
}
for (PxI32 a = 0; a <= 1; ++a) for (PxI32 b = 0; b <= 1; ++b)
{
PxReal p = corners[b][0][a];
PxReal q = corners[b][1][a];
if ((p <= threshold && q >= threshold) || (q <= threshold && p >= threshold))
{
PxReal t = (q != p) ? PxClamp((threshold - p) / (q - p), 0.0f, 1.0f) : 0.5f;
sum += PxVec3(PxReal(b), t, PxReal(a));
++counter;
}
}
for (PxI32 a = 0; a <= 1; ++a) for (PxI32 b = 0; b <= 1; ++b)
{
PxReal p = corners[0][a][b];
PxReal q = corners[1][a][b];
if ((p <= threshold && q >= threshold) || (q <= threshold && p >= threshold))
{
PxReal t = (q != p) ? PxClamp((threshold - p) / (q - p), 0.0f, 1.0f) : 0.5f;
sum += PxVec3(t, PxReal(a), PxReal(b));
++counter;
}
}
if (counter > 0)
{
point = box.minimum + sum * (sdf.mSpacing / counter);
return true;
}
}
return false;
}
PX_FORCE_INLINE bool generatePointInCell(const Gu::SDF& sdf, PxI32 x, PxI32 y, PxI32 z, PxVec3& point)
{
PxReal corners[2][2][2];
for (PxI32 xx = 0; xx <= 1; ++xx) for (PxI32 yy = 0; yy <= 1; ++yy) for (PxI32 zz = 0; zz <= 1; ++zz)
{
PxReal v = sdf.decodeSparse(x + xx, y + yy, z + zz);
corners[xx][yy][zz] = v;
}
return generatePointInCell(sdf, x, y, z, point, corners);
}
PX_FORCE_INLINE bool generatePointInCellUsingCache(const Gu::SDF& sdf, PxI32 xBase, PxI32 yBase, PxI32 zBase, PxI32 x, PxI32 y, PxI32 z, PxVec3& point, const PxArray<PxReal>& cache)
{
const PxU32 s = sdf.mSubgridSize + 1;
PxReal corners[2][2][2];
for (PxI32 xx = 0; xx <= 1; ++xx) for (PxI32 yy = 0; yy <= 1; ++yy) for (PxI32 zz = 0; zz <= 1; ++zz)
{
PxReal v = cache[idx3D(x + xx, y + yy, z + zz, s, s)];
corners[xx][yy][zz] = v;
}
return generatePointInCell(sdf, xBase * sdf.mSubgridSize + x, yBase * sdf.mSubgridSize + y, zBase * sdf.mSubgridSize + z, point, corners);
}
PxReal SDF::decodeDense(PxI32 x, PxI32 y, PxI32 z) const
{
if (x < 0 || y < 0 || z < 0 || x >= PxI32(mDims.x) || y >= PxI32(mDims.y) || z >= PxI32(mDims.z))
return 1.0; //Return a value >0 that counts as outside
return mSdf[idx3D(x, y, z, mDims.x, mDims.y)];
}
PX_FORCE_INLINE bool generatePointInCellDense(const Gu::SDF& sdf, PxI32 x, PxI32 y, PxI32 z, PxVec3& point)
{
PxReal corners[2][2][2];
for (PxI32 xx = 0; xx <= 1; ++xx) for (PxI32 yy = 0; yy <= 1; ++yy) for (PxI32 zz = 0; zz <= 1; ++zz)
{
PxReal v = sdf.decodeDense(x + xx, y + yy, z + zz);
corners[xx][yy][zz] = v;
}
return generatePointInCell(sdf, x, y, z, point, corners);
}
PX_FORCE_INLINE bool canSkipSubgrid(const Gu::SDF& sdf, PxI32 i, PxI32 j, PxI32 k)
{
const PxReal t = 0.1f * sdf.mSpacing;
const PxI32 nbX = sdf.mDims.x / sdf.mSubgridSize;
const PxI32 nbY = sdf.mDims.y / sdf.mSubgridSize;
const PxI32 nbZ = sdf.mDims.z / sdf.mSubgridSize;
if (i < 0 || j < 0 || k < 0 || i >= nbX || j >= nbY || k >= nbZ)
return false;
if (sdf.mSubgridStartSlots[k * (nbX) * (nbY)+j * (nbX)+i] == 0xFFFFFFFFu)
{
PxU32 positiveCounter = 0;
PxU32 negativeCounter = 0;
for (PxI32 xx = 0; xx <= 1; ++xx) for (PxI32 yy = 0; yy <= 1; ++yy) for (PxI32 zz = 0; zz <= 1; ++zz)
{
PxReal v = decodeSparse2(sdf, (i + xx)* sdf.mSubgridSize, (j + yy) * sdf.mSubgridSize, (k + zz) * sdf.mSubgridSize);
if (v > t)
++positiveCounter;
if (v < t)
++negativeCounter;
}
if (positiveCounter == 8 || negativeCounter == 8)
return true;
}
return false;
}
struct Map : public PxHashMap<PxU64, PxU32>, public PxUserAllocated
{
Map()
{
}
};
struct CellToPoint
{
PxArray<Map*> cellToPoint;
CellToPoint(PxU32 numThreads)
{
cellToPoint.resize(numThreads);
for (PxU32 i = 0; i < cellToPoint.size(); ++i)
cellToPoint[i] = PX_NEW(Map);
}
~CellToPoint()
{
for (PxU32 i = 0; i < cellToPoint.size(); ++i)
{
PX_DELETE(cellToPoint[i]);
}
}
const PxPair<const PxU64, PxU32>* find(PxI32 xId, PxI32 yId, PxI32 zId) const
{
PxU64 k = key(xId, yId, zId);
for (PxU32 i = 0; i < cellToPoint.size(); ++i)
{
const PxPair<const PxU64, PxU32>* f = cellToPoint[i]->find(k);
if (f)
return f;
}
return NULL;
}
void insert(PxI32 threadId, PxI32 xId, PxI32 yId, PxI32 zId, PxU32 value)
{
cellToPoint[threadId]->insert(key(xId, yId, zId), value);
}
};
PX_INLINE void createTriangles(PxI32 xId, PxI32 yId, PxI32 zId, PxReal d0, PxReal ds[3],
const CellToPoint& cellToPoint, const PxArray<PxVec3>& points, PxArray<PxU32>& triangleIndices)
{
bool flipTriangleOrientation = false;
const PxReal threshold = 0.0f;
PxI32 num = 0;
for (PxI32 dim = 0; dim < 3; dim++)
{
PxReal d = ds[dim];
if ((d0 <= threshold && d >= threshold) || (d <= threshold && d0 >= threshold))
num++;
}
if (num == 0)
return;
PxI32 buffer[4];
const PxPair<const PxU64, PxU32>* f = cellToPoint.find(xId, yId, zId);
if (!f)
return;
buffer[0] = f->second;
PxVec3 v0 = points[buffer[0]];
for (PxI32 dim = 0; dim < 3; dim++)
{
PxReal d = ds[dim];
bool b1 = d0 <= threshold && d >= threshold;
bool b2 = d <= threshold && d0 >= threshold;
if (b1 || b2)
{
bool flip = flipTriangleOrientation == b1;
bool skip = false;
for (PxI32 ii = 0; ii < 3; ++ii)
{
f = cellToPoint.find(xId + offsets[dim][ii][0], yId + offsets[dim][ii][1], zId + offsets[dim][ii][2]);
if (f)
buffer[ii + 1] = f->second;
else
skip = true;
}
if (skip)
continue;
PxI32 shift = PxMax(0, indexOfMostConcaveCorner(dim, v0, points[buffer[1]], points[buffer[2]], points[buffer[3]])) % 2;
//Split the quad into two triangles
for (PxI32 ii = 0; ii < 2; ++ii)
{
triangleIndices.pushBack(buffer[shift]);
if (flip)
{
for (PxI32 jj = 2; jj >= 1; --jj)
triangleIndices.pushBack(buffer[(ii + jj + shift) % 4]);
}
else
{
for (PxI32 jj = 1; jj < 3; ++jj)
triangleIndices.pushBack(buffer[(ii + jj + shift) % 4]);
}
}
}
}
}
PX_INLINE void populateSubgridCache(const Gu::SDF& sdf, PxArray<PxReal>& sdfCache, PxI32 i, PxI32 j, PxI32 k)
{
const PxU32 s = sdf.mSubgridSize + 1;
for (PxU32 z = 0; z <= sdf.mSubgridSize; ++z)
for (PxU32 y = 0; y <= sdf.mSubgridSize; ++y)
for (PxU32 x = 0; x <= sdf.mSubgridSize; ++x)
{
sdfCache[idx3D(x, y, z, s, s)] =
decodeSparse2(sdf, i * PxI32(sdf.mSubgridSize) + PxI32(x),
j * PxI32(sdf.mSubgridSize) + PxI32(y),
k * PxI32(sdf.mSubgridSize) + PxI32(z));
}
}
struct IsosurfaceThreadData
{
const Gu::SDF& sdf;
PxArray<PxVec3> isosurfaceVertices;
const PxArray<PxVec3>& allIsosurfaceVertices;
PxArray<PxU32> isosurfaceTriangleIndices;
PxArray<PxReal> sdfCache;
CellToPoint& cellToPoint;
PxU32 startIndex;
PxU32 endIndex;
PxU32 threadIndex;
PxI32 nbX;
PxI32 nbY;
PxI32 nbZ;
IsosurfaceThreadData(const Gu::SDF& sdf_, CellToPoint& cellToPoint_, const PxArray<PxVec3>& allIsosurfaceVertices_) :
sdf(sdf_), allIsosurfaceVertices(allIsosurfaceVertices_), cellToPoint(cellToPoint_)
{ }
};
void* computeIsosurfaceVerticesThreadJob(void* data)
{
IsosurfaceThreadData & d = *reinterpret_cast<IsosurfaceThreadData*>(data);
for (PxU32 indexer = d.startIndex; indexer < d.endIndex; ++indexer)
{
PxU32 ii, jj, kk;
idToXYZ(indexer, d.nbX, d.nbY, ii, jj, kk);
PxI32 i = PxI32(ii) - 1;
PxI32 j = PxI32(jj) - 1;
PxI32 k = PxI32(kk) - 1;
if (canSkipSubgrid(d.sdf, i, j, k))
continue;
populateSubgridCache(d.sdf, d.sdfCache, i, j, k);
//Process the subgrid
for (PxU32 z = 0; z < d.sdf.mSubgridSize; ++z)
{
for (PxU32 y = 0; y < d.sdf.mSubgridSize; ++y)
{
for (PxU32 x = 0; x < d.sdf.mSubgridSize; ++x)
{
PxVec3 p;
if (generatePointInCellUsingCache(d.sdf, i, j, k, x, y, z, p, d.sdfCache))
{
PxU32 xId = i * d.sdf.mSubgridSize + x;
PxU32 yId = j * d.sdf.mSubgridSize + y;
PxU32 zId = k * d.sdf.mSubgridSize + z;
d.cellToPoint.insert(d.threadIndex, xId, yId, zId, d.isosurfaceVertices.size());
d.isosurfaceVertices.pushBack(p);
}
}
}
}
}
return NULL;
}
void* computeIsosurfaceTrianglesThreadJob(void* data)
{
IsosurfaceThreadData & d = *reinterpret_cast<IsosurfaceThreadData*>(data);
const PxU32 s = d.sdf.mSubgridSize + 1;
for (PxU32 indexer = d.startIndex; indexer < d.endIndex; ++indexer)
{
PxU32 ii, jj, kk;
idToXYZ(indexer, d.nbX, d.nbY, ii, jj, kk);
PxI32 i = PxI32(ii) - 1;
PxI32 j = PxI32(jj) - 1;
PxI32 k = PxI32(kk) - 1;
if (canSkipSubgrid(d.sdf, i, j, k))
continue;
populateSubgridCache(d.sdf, d.sdfCache, i, j, k);
PxReal ds[3];
//Process the subgrid
for (PxU32 z = 0; z < d.sdf.mSubgridSize; ++z)
{
for (PxU32 y = 0; y < d.sdf.mSubgridSize; ++y)
{
for (PxU32 x = 0; x < d.sdf.mSubgridSize; ++x)
{
PxReal d0 = d.sdfCache[idx3D(x, y, z, s, s)];
ds[0] = d.sdfCache[idx3D(x + 1, y, z, s, s)];
ds[1] = d.sdfCache[idx3D(x, y + 1, z, s, s)];
ds[2] = d.sdfCache[idx3D(x, y, z + 1, s, s)];
createTriangles(x + i * d.sdf.mSubgridSize, y + j * d.sdf.mSubgridSize, z + k * d.sdf.mSubgridSize, d0, ds,
d.cellToPoint, d.allIsosurfaceVertices, d.isosurfaceTriangleIndices);
}
}
}
}
return NULL;
}
void extractIsosurfaceFromSDFSerial(const Gu::SDF& sdf, PxArray<PxVec3>& isosurfaceVertices, PxArray<PxU32>& isosurfaceTriangleIndices)
{
isosurfaceVertices.clear();
isosurfaceTriangleIndices.clear();
const PxI32 nbX = sdf.mDims.x / PxMax(1u, sdf.mSubgridSize);
const PxI32 nbY = sdf.mDims.y / PxMax(1u, sdf.mSubgridSize);
const PxI32 nbZ = sdf.mDims.z / PxMax(1u, sdf.mSubgridSize);
PxU32 sizeEstimate = PxU32(PxSqrt(PxReal(nbX*nbY * nbZ)));
CellToPoint cellToPoint(1);
isosurfaceVertices.reserve(sizeEstimate);
isosurfaceTriangleIndices.reserve(sizeEstimate);
PxArray<PxReal> sdfCache;
sdfCache.resize((sdf.mSubgridSize + 1) * (sdf.mSubgridSize + 1) * (sdf.mSubgridSize + 1));
if (sdf.mSubgridSize == 0)
{
//Dense SDF
for (PxI32 k = -1; k <= nbZ; ++k)
for (PxI32 j = -1; j <= nbY; ++j)
for (PxI32 i = -1; i <= nbX; ++i)
{
PxVec3 p;
if (generatePointInCellDense(sdf, i, j, k, p))
{
cellToPoint.insert(0, i, j, k, isosurfaceVertices.size());
isosurfaceVertices.pushBack(p);
}
}
}
else
{
for (PxI32 k = -1; k <= nbZ; ++k)
{
for (PxI32 j = -1; j <= nbY; ++j)
{
for (PxI32 i = -1; i <= nbX; ++i)
{
if (canSkipSubgrid(sdf, i, j, k))
continue;
populateSubgridCache(sdf, sdfCache, i, j, k);
//Process the subgrid
for (PxU32 z = 0; z < sdf.mSubgridSize; ++z)
{
for (PxU32 y = 0; y < sdf.mSubgridSize; ++y)
{
for (PxU32 x = 0; x < sdf.mSubgridSize; ++x)
{
PxVec3 p;
if (generatePointInCellUsingCache(sdf, i, j, k, x, y, z, p, sdfCache))
{
PxU32 xId = i * sdf.mSubgridSize + x;
PxU32 yId = j * sdf.mSubgridSize + y;
PxU32 zId = k * sdf.mSubgridSize + z;
cellToPoint.insert(0, xId, yId, zId, isosurfaceVertices.size());
isosurfaceVertices.pushBack(p);
}
}
}
}
}
}
}
}
if (sdf.mSubgridSize == 0)
{
for (PxI32 k = -1; k <= nbZ; ++k)
for (PxI32 j = -1; j <= nbY; ++j)
for (PxI32 i = -1; i <= nbX; ++i)
{
PxReal d0 = sdf.decodeDense(i, j, k);
PxReal ds[3];
ds[0] = sdf.decodeDense(i + 1, j, k);
ds[1] = sdf.decodeDense(i, j + 1, k);
ds[2] = sdf.decodeDense(i, j, k + 1);
createTriangles(i, j, k, d0, ds, cellToPoint, isosurfaceVertices, isosurfaceTriangleIndices);
}
}
else
{
const PxU32 s = sdf.mSubgridSize + 1;
for (PxI32 k = -1; k <= nbZ; ++k)
{
for (PxI32 j = -1; j <= nbY; ++j)
{
for (PxI32 i = -1; i <= nbX; ++i)
{
if (canSkipSubgrid(sdf, i, j, k))
continue;
populateSubgridCache(sdf, sdfCache, i, j, k);
PxReal ds[3];
//Process the subgrid
for (PxU32 z = 0; z < sdf.mSubgridSize; ++z)
{
for (PxU32 y = 0; y < sdf.mSubgridSize; ++y)
{
for (PxU32 x = 0; x < sdf.mSubgridSize; ++x)
{
PxReal d0 = sdfCache[idx3D(x, y, z, s, s)];
ds[0] = sdfCache[idx3D(x + 1, y, z, s, s)];
ds[1] = sdfCache[idx3D(x, y + 1, z, s, s)];
ds[2] = sdfCache[idx3D(x, y, z + 1, s, s)];
createTriangles(x + i * sdf.mSubgridSize, y + j * sdf.mSubgridSize, z + k * sdf.mSubgridSize, d0, ds,
cellToPoint, isosurfaceVertices, isosurfaceTriangleIndices);
}
}
}
}
}
}
}
}
void extractIsosurfaceFromSDF(const Gu::SDF& sdf, PxArray<PxVec3>& isosurfaceVertices, PxArray<PxU32>& isosurfaceTriangleIndices, PxU32 numThreads)
{
if (sdf.mSubgridSize == 0)
{
//Handle dense SDFs using the serial fallback
extractIsosurfaceFromSDFSerial(sdf, isosurfaceVertices, isosurfaceTriangleIndices);
return;
}
numThreads = PxMax(1u, numThreads);
PxArray<PxThread*> threads;
PxArray<IsosurfaceThreadData> perThreadData;
CellToPoint cellToPoint(numThreads);
const PxI32 nbX = sdf.mDims.x / PxMax(1u, sdf.mSubgridSize);
const PxI32 nbY = sdf.mDims.y / PxMax(1u, sdf.mSubgridSize);
const PxI32 nbZ = sdf.mDims.z / PxMax(1u, sdf.mSubgridSize);
PxU32 l = (nbX + 2) * (nbY + 2) * (nbZ + 2);
PxU32 range = l / numThreads;
for (PxU32 i = 0; i < numThreads; ++i)
{
perThreadData.pushBack(IsosurfaceThreadData(sdf, cellToPoint, isosurfaceVertices));
IsosurfaceThreadData& d = perThreadData[i];
d.startIndex = i * range;
d.endIndex = (i + 1) * range;
if (i == numThreads - 1)
d.endIndex = l;
d.nbX = nbX + 2;
d.nbY = nbY + 2;
d.nbZ = nbZ + 2;
d.sdfCache.resize((sdf.mSubgridSize + 1) * (sdf.mSubgridSize + 1) * (sdf.mSubgridSize + 1));
d.threadIndex = i;
}
for (PxU32 i = 0; i < numThreads; ++i)
{
if (perThreadData.size() == 1)
computeIsosurfaceVerticesThreadJob(&perThreadData[i]);
else
{
threads.pushBack(PX_NEW(PxThread)(computeIsosurfaceVerticesThreadJob, &perThreadData[i], "thread"));
threads[i]->start();
}
}
for (PxU32 i = 0; i < threads.size(); ++i)
{
threads[i]->waitForQuit();
}
for (PxU32 i = 0; i < threads.size(); ++i)
{
threads[i]->~PxThreadT();
PX_FREE(threads[i]);
}
//Collect vertices
PxU32 sum = 0;
for (PxU32 i = 0; i < perThreadData.size(); ++i)
{
IsosurfaceThreadData& d = perThreadData[i];
if (sum > 0)
{
for (PxHashMap<PxU64, PxU32>::Iterator iter = cellToPoint.cellToPoint[i]->getIterator(); !iter.done(); ++iter)
iter->second += sum;
}
sum += d.isosurfaceVertices.size();
}
isosurfaceVertices.reserve(sum);
for (PxU32 i = 0; i < perThreadData.size(); ++i)
{
IsosurfaceThreadData& d = perThreadData[i];
for (PxU32 j = 0; j < d.isosurfaceVertices.size(); ++j)
isosurfaceVertices.pushBack(d.isosurfaceVertices[j]);
d.isosurfaceTriangleIndices.reset(); //Release memory that is not needed anymore
}
threads.clear();
for (PxU32 i = 0; i < numThreads; ++i)
{
if (perThreadData.size() == 1)
computeIsosurfaceTrianglesThreadJob(&perThreadData[i]);
else
{
threads.pushBack(PX_NEW(PxThread)(computeIsosurfaceTrianglesThreadJob, &perThreadData[i], "thread"));
threads[i]->start();
}
}
for (PxU32 i = 0; i < threads.size(); ++i)
{
threads[i]->waitForQuit();
}
for (PxU32 i = 0; i < threads.size(); ++i)
{
threads[i]->~PxThreadT();
PX_FREE(threads[i]);
}
//Collect triangles
sum = 0;
for (PxU32 i = 0; i < perThreadData.size(); ++i)
sum += perThreadData[i].isosurfaceTriangleIndices.size();
isosurfaceTriangleIndices.resize(sum);
for (PxU32 i = 0; i < perThreadData.size(); ++i)
{
IsosurfaceThreadData& d = perThreadData[i];
for (PxU32 j = 0; j < d.isosurfaceTriangleIndices.size(); ++j)
isosurfaceTriangleIndices.pushBack(d.isosurfaceTriangleIndices[j]);
}
}
}
}
| 68,852 | C++ | 29.931267 | 332 | 0.639008 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuExtendedBucketPruner.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxBitMap.h"
#include "GuExtendedBucketPruner.h"
#include "GuAABBTree.h"
#include "GuAABBTreeQuery.h"
#include "GuQuery.h"
#include "GuCallbackAdapter.h"
#include "GuSqInternal.h"
using namespace physx;
using namespace Gu;
#define EXT_NB_OBJECTS_PER_NODE 4
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
ExtendedBucketPruner::ExtendedBucketPruner(PxU64 contextID, CompanionPrunerType type, const PruningPool* pool) :
mCompanion (createCompanionPruner(contextID, type, pool)),
mPruningPool (pool),
mMainTree (NULL),
mMergedTrees (NULL),
mCurrentTreeIndex (0),
mTreesDirty (false)
{
// preallocated size for bounds, trees
mCurrentTreeCapacity = 32;
mBounds.init(mCurrentTreeCapacity);
mMergedTrees = PX_ALLOCATE(MergedTree, mCurrentTreeCapacity, "AABB trees");
mExtendedBucketPrunerMap.reserve(mCurrentTreeCapacity);
// create empty main tree
mMainTree = PX_NEW(AABBTree);
// create empty merge trees
for (PxU32 i = 0; i < mCurrentTreeCapacity; i++)
{
mMergedTrees[i].mTimeStamp = 0;
mMergedTrees[i].mTree = PX_NEW(AABBTree);
}
}
//////////////////////////////////////////////////////////////////////////
ExtendedBucketPruner::~ExtendedBucketPruner()
{
// release main tree
PX_DELETE(mMainTree);
// release merged trees
for (PxU32 i = 0; i < mCurrentTreeCapacity; i++)
{
AABBTree* aabbTree = mMergedTrees[i].mTree;
PX_DELETE(aabbTree);
}
mBounds.release();
PX_FREE(mMergedTrees);
PX_DELETE(mCompanion);
}
//////////////////////////////////////////////////////////////////////////
// release all objects in bucket pruner
void ExtendedBucketPruner::release()
{
if(mCompanion)
mCompanion->release();
mMainTreeUpdateMap.release();
mMergeTreeUpdateMap.release();
// release all objecs from the map
mExtendedBucketPrunerMap.clear();
// release all merged trees
for (PxU32 i = 0; i < mCurrentTreeCapacity; i++)
{
mMergedTrees[i].mTimeStamp = 0;
mMergedTrees[i].mTree->release();
}
// reset current tree index
mCurrentTreeIndex = 0;
}
//////////////////////////////////////////////////////////////////////////
// Add a tree from a pruning structure
// 1. get new tree index
// 2. initialize merged tree, bounds
// 3. create update map for the merged tree
// 4. build new tree of trees from given trees bounds
// 5. add new objects into extended bucket pruner map
// 6. shift indices in the merged tree
void ExtendedBucketPruner::addTree(const AABBTreeMergeData& mergeData, PxU32 timeStamp)
{
// check if we have to resize
if(mCurrentTreeIndex == mCurrentTreeCapacity)
{
resize(mCurrentTreeCapacity*2);
}
// get current merge tree index
const PxU32 mergeTreeIndex = mCurrentTreeIndex++;
// get payloads/userdata pointers - the pointers start at mIndicesOffset, thats where all
// objects were added before merge was called
const PrunerPayload* data = &mPruningPool->getObjects()[mergeData.mIndicesOffset];
// setup merged tree with the merge data and timestamp
mMergedTrees[mergeTreeIndex].mTimeStamp = timeStamp;
AABBTree& mergedTree = *mMergedTrees[mergeTreeIndex].mTree;
mergedTree.initTree(mergeData);
// set bounds
mBounds.getBounds()[mergeTreeIndex] = mergeData.getRootNode().mBV;
// update temporally update map for the current merge tree, map is used to setup the base extended bucket pruner map
mMergeTreeUpdateMap.initMap(mergeData.mNbIndices, mergedTree);
// create new base tree of trees
buildMainAABBTree();
// Add each object into extended bucket pruner hash map
for (PxU32 i = 0; i < mergeData.mNbIndices; i++)
{
ExtendedBucketPrunerData mapData;
mapData.mMergeIndex = mergeTreeIndex;
mapData.mTimeStamp = timeStamp;
PX_ASSERT(mMergeTreeUpdateMap[i] < mergedTree.getNbNodes());
// get node information from the merge tree update map
mapData.mSubTreeNode = mMergeTreeUpdateMap[i];
mExtendedBucketPrunerMap.insert(data[i], mapData);
}
// merged tree indices needs to be shifted now, we cannot shift it in init - the update map
// could not be constructed otherwise, as the indices wont start from 0. The indices
// needs to be shifted by offset from the pruning pool, where the new objects were added into the pruning pool.
mergedTree.shiftIndices(mergeData.mIndicesOffset);
#if PX_DEBUG
checkValidity();
#endif // PX_DEBUG
}
//////////////////////////////////////////////////////////////////////////
// Builds the new main AABB tree with given current active merged trees and its bounds
void ExtendedBucketPruner::buildMainAABBTree()
{
// create the AABB tree from given merged trees bounds
NodeAllocator nodeAllocator;
bool status = mMainTree->build(AABBTreeBuildParams(EXT_NB_OBJECTS_PER_NODE, mCurrentTreeIndex, &mBounds), nodeAllocator);
PX_UNUSED(status);
PX_ASSERT(status);
// Init main tree update map for the new main tree
mMainTreeUpdateMap.initMap(mCurrentTreeIndex, *mMainTree);
}
//////////////////////////////////////////////////////////////////////////
// resize internal memory, buffers
void ExtendedBucketPruner::resize(PxU32 size)
{
PX_ASSERT(size > mCurrentTreeCapacity);
mBounds.resize(size, mCurrentTreeCapacity);
// allocate new merged trees
MergedTree* newMergeTrees = PX_ALLOCATE(MergedTree, size, "AABB trees");
// copy previous merged trees
PxMemCopy(newMergeTrees, mMergedTrees, sizeof(MergedTree)*mCurrentTreeCapacity);
PX_FREE(mMergedTrees);
mMergedTrees = newMergeTrees;
// allocate new trees for merged trees
for (PxU32 i = mCurrentTreeCapacity; i < size; i++)
{
mMergedTrees[i].mTimeStamp = 0;
mMergedTrees[i].mTree = PX_NEW(AABBTree);
}
mCurrentTreeCapacity = size;
}
//////////////////////////////////////////////////////////////////////////
// Update object
bool ExtendedBucketPruner::updateObject(const PxBounds3& worldAABB, const PxTransform& transform, const PrunerPayload& object, PrunerHandle handle, const PoolIndex poolIndex)
{
const ExtendedBucketPrunerMap::Entry* extendedPrunerEntry = mExtendedBucketPrunerMap.find(object);
// if object is not in tree of trees, it is in bucket pruner core
if(!extendedPrunerEntry)
{
if(mCompanion)
mCompanion->updateObject(object, handle, worldAABB, transform, poolIndex);
}
else
{
const ExtendedBucketPrunerData& data = extendedPrunerEntry->second;
PX_ASSERT(data.mMergeIndex < mCurrentTreeIndex);
// update tree where objects belongs to
AABBTree& tree = *mMergedTrees[data.mMergeIndex].mTree;
PX_ASSERT(data.mSubTreeNode < tree.getNbNodes());
// mark for refit node in merged tree
tree.markNodeForRefit(data.mSubTreeNode);
PX_ASSERT(mMainTreeUpdateMap[data.mMergeIndex] < mMainTree->getNbNodes());
// mark for refit node in main aabb tree
mMainTree->markNodeForRefit(mMainTreeUpdateMap[data.mMergeIndex]);
mTreesDirty = true;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
// refit merged nodes
// 1. refit nodes in merged trees
// 2. check if after refit root node is valid - might happen edge case
// where all objects were released - the root node is then invalid
// in this edge case we need to compact the merged trees array
// and create new main AABB tree
// 3. If all merged trees bounds are valid - refit main tree
// 4. If bounds are invalid create new main AABB tree
void ExtendedBucketPruner::refitMarkedNodes(const PxBounds3* boxes)
{
// if no tree needs update early exit
if(!mTreesDirty)
return;
// refit trees and update bounds for main tree
PxU32 nbValidTrees = 0;
for (PxU32 i = mCurrentTreeIndex; i--; )
{
AABBTree& tree = *mMergedTrees[i].mTree;
tree.refitMarkedNodes(boxes);
const PxBounds3& bounds = tree.getNodes()[0].mBV;
// check if bounds are valid, if all objects of the tree were released, the bounds
// will be invalid, in that case we cannot use this tree anymore.
if(bounds.isValid())
{
nbValidTrees++;
}
mBounds.getBounds()[i] = bounds;
}
if(nbValidTrees == mCurrentTreeIndex)
{
// no tree has been removed refit main tree
mMainTree->refitMarkedNodes(mBounds.getBounds());
}
else
{
// edge case path, tree does not have a valid root node bounds - all objects from the tree were released
// we might even fire perf warning
// compact the tree array - no holes in the array, remember the swap position
PxU32* swapMap = PX_ALLOCATE(PxU32, (mCurrentTreeIndex + 1), "Swap Map");
PxU32 writeIndex = 0;
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
AABBTree& tree = *mMergedTrees[i].mTree;
if(tree.getNodes()[0].mBV.isValid())
{
// we have to store the tree into an empty location
if(i != writeIndex)
{
PX_ASSERT(writeIndex < i);
AABBTree* ptr = mMergedTrees[writeIndex].mTree;
mMergedTrees[writeIndex] = mMergedTrees[i];
mMergedTrees[i].mTree = ptr;
mBounds.getBounds()[writeIndex] = mBounds.getBounds()[i];
}
// remember the swap location
swapMap[i] = writeIndex;
writeIndex++;
}
else
{
// tree is not valid, release it
tree.release();
mMergedTrees[i].mTimeStamp = 0;
}
// remember the swap
swapMap[mCurrentTreeIndex] = i;
}
PX_ASSERT(writeIndex == nbValidTrees);
// new merged trees size
mCurrentTreeIndex = nbValidTrees;
if(mCurrentTreeIndex)
{
// trees have changed, we need to rebuild the main tree
buildMainAABBTree();
// fixup the object entries, the merge index has changed
for (ExtendedBucketPrunerMap::Iterator iter = mExtendedBucketPrunerMap.getIterator(); !iter.done(); ++iter)
{
ExtendedBucketPrunerData& data = iter->second;
PX_ASSERT(swapMap[data.mMergeIndex] < nbValidTrees);
data.mMergeIndex = swapMap[data.mMergeIndex];
}
}
else
{
// if there is no tree release the main tree
mMainTree->release();
}
PX_FREE(swapMap);
}
#if PX_DEBUG
checkValidity();
#endif
mTreesDirty = false;
}
//////////////////////////////////////////////////////////////////////////
// remove object
bool ExtendedBucketPruner::removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex)
{
ExtendedBucketPrunerMap::Entry dataEntry;
// if object is not in tree of trees, it is in bucket pruner core
if (!mExtendedBucketPrunerMap.erase(object, dataEntry))
{
// we need to call invalidateObjects, it might happen that the swapped object
// does belong to the extended bucket pruner, in that case the objects index
// needs to be swapped.
// do not call additional bucket pruner swap, that does happen during remove
swapIndex(objectIndex, swapObject, swapObjectIndex, false);
return mCompanion ? mCompanion->removeObject(object, handle, objectIndex, swapObjectIndex) : true;
}
else
{
const ExtendedBucketPrunerData& data = dataEntry.second;
// mark tree nodes where objects belongs to
AABBTree& tree = *mMergedTrees[data.mMergeIndex].mTree;
PX_ASSERT(data.mSubTreeNode < tree.getNbNodes());
// mark the merged tree for refit
tree.markNodeForRefit(data.mSubTreeNode);
PX_ASSERT(mMainTreeUpdateMap[data.mMergeIndex] < mMainTree->getNbNodes());
// mark the main tree for refit
mMainTree->markNodeForRefit(mMainTreeUpdateMap[data.mMergeIndex]);
// call invalidate object to swap the object indices in the merged trees
invalidateObject(data, objectIndex, swapObject, swapObjectIndex);
mTreesDirty = true;
}
#if PX_DEBUG
checkValidity();
#endif // PX_DEBUG
return true;
}
//////////////////////////////////////////////////////////////////////////
// invalidate object
// remove the objectIndex from the merged tree
void ExtendedBucketPruner::invalidateObject(const ExtendedBucketPrunerData& data, PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex)
{
// get the merged tree
AABBTree& tree = *mMergedTrees[data.mMergeIndex].mTree;
PX_ASSERT(data.mSubTreeNode < tree.getNbNodes());
PX_ASSERT(tree.getNodes()[data.mSubTreeNode].isLeaf());
// get merged tree node
BVHNode& node0 = tree.getNodes()[data.mSubTreeNode];
const PxU32 nbPrims = node0.getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= EXT_NB_OBJECTS_PER_NODE);
// retrieve the primitives pointer
PxU32* primitives = node0.getPrimitives(tree.getIndices());
PX_ASSERT(primitives);
// Look for desired pool index in the leaf
bool foundIt = false;
for (PxU32 i = 0; i < nbPrims; i++)
{
if (objectIndex == primitives[i])
{
foundIt = true;
const PxU32 last = nbPrims - 1;
node0.setNbRunTimePrimitives(last);
primitives[i] = INVALID_POOL_ID; // Mark primitive index as invalid in the node
// Swap within the leaf node. No need to update the mapping since they should all point
// to the same tree node anyway.
if (last != i)
PxSwap(primitives[i], primitives[last]);
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
swapIndex(objectIndex, swapObject, swapObjectIndex);
}
// Swap object index
// if swapObject is in a merged tree its index needs to be swapped with objectIndex
void ExtendedBucketPruner::swapIndex(PxU32 objectIndex, const PrunerPayload& swapObject, PxU32 swapObjectIndex, bool corePrunerIncluded)
{
PX_UNUSED(corePrunerIncluded);
if (objectIndex == swapObjectIndex)
return;
const ExtendedBucketPrunerMap::Entry* extendedPrunerSwapEntry = mExtendedBucketPrunerMap.find(swapObject);
// if swapped object index is in extended pruner, we have to fix the primitives index
if (extendedPrunerSwapEntry)
{
const ExtendedBucketPrunerData& swapData = extendedPrunerSwapEntry->second;
AABBTree& swapTree = *mMergedTrees[swapData.mMergeIndex].mTree;
// With multiple primitives per leaf, tree nodes may very well be the same for different pool indices.
// However the pool indices may be the same when a swap has been skipped in the pruning pool, in which
// case there is nothing to do.
PX_ASSERT(swapData.mSubTreeNode < swapTree.getNbNodes());
PX_ASSERT(swapTree.getNodes()[swapData.mSubTreeNode].isLeaf());
BVHNode* node1 = swapTree.getNodes() + swapData.mSubTreeNode;
const PxU32 nbPrims = node1->getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= EXT_NB_OBJECTS_PER_NODE);
// retrieve the primitives pointer
PxU32* primitives = node1->getPrimitives(swapTree.getIndices());
PX_ASSERT(primitives);
// look for desired pool index in the leaf
bool foundIt = false;
for (PxU32 i = 0; i < nbPrims; i++)
{
if (swapObjectIndex == primitives[i])
{
foundIt = true;
primitives[i] = objectIndex; // point node to the pool object moved to
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
else
{
if(corePrunerIncluded)
if(mCompanion)
mCompanion->swapIndex(objectIndex, swapObjectIndex);
}
}
//////////////////////////////////////////////////////////////////////////
// Optimized removal of timestamped objects from the extended bucket pruner
PxU32 ExtendedBucketPruner::removeMarkedObjects(PxU32 timeStamp)
{
// remove objects from the core bucket pruner
PxU32 retVal = mCompanion ? mCompanion->removeMarkedObjects(timeStamp) : 0;
// nothing to be removed
if(!mCurrentTreeIndex)
return retVal;
// if last merged tree is the timeStamp to remove, we can clear all
// this is safe as the merged trees array is time ordered, never shifted
if(mMergedTrees[mCurrentTreeIndex - 1].mTimeStamp == timeStamp)
{
retVal += mExtendedBucketPrunerMap.size();
cleanTrees();
return retVal;
}
// get the highest index in the merged trees array, where timeStamp match
// we release than all trees till the index
PxU32 highestTreeIndex = 0xFFFFFFFF;
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
if(mMergedTrees[i].mTimeStamp == timeStamp)
highestTreeIndex = i;
else
break;
}
// if no timestamp found early exit
if(highestTreeIndex == 0xFFFFFFFF)
return retVal;
PX_ASSERT(highestTreeIndex < mCurrentTreeIndex);
// get offset, where valid trees start
const PxU32 mergeTreeOffset = highestTreeIndex + 1;
// shrink the array to merged trees with a valid timeStamp
mCurrentTreeIndex = mCurrentTreeIndex - mergeTreeOffset;
// go over trees and swap released trees with valid trees from the back (valid trees are at the back)
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
// store bounds, timestamp
mBounds.getBounds()[i] = mMergedTrees[mergeTreeOffset + i].mTree->getNodes()[0].mBV;
mMergedTrees[i].mTimeStamp = mMergedTrees[mergeTreeOffset + i].mTimeStamp;
// release the tree with timestamp
AABBTree* ptr = mMergedTrees[i].mTree;
ptr->release();
// store the valid tree
mMergedTrees[i].mTree = mMergedTrees[mergeTreeOffset + i].mTree;
// store the release tree at the offset
mMergedTrees[mergeTreeOffset + i].mTree = ptr;
mMergedTrees[mergeTreeOffset + i].mTimeStamp = 0;
}
// release the rest of the trees with not valid timestamp
for (PxU32 i = mCurrentTreeIndex; i <= highestTreeIndex; i++)
{
mMergedTrees[i].mTree->release();
mMergedTrees[i].mTimeStamp = 0;
}
// build new main AABB tree with only trees with valid valid timeStamp
buildMainAABBTree();
// remove all unnecessary trees and map entries
bool removeEntry = false;
PxU32 numRemovedEntries = 0;
ExtendedBucketPrunerMap::EraseIterator eraseIterator = mExtendedBucketPrunerMap.getEraseIterator();
ExtendedBucketPrunerMap::Entry* entry = eraseIterator.eraseCurrentGetNext(removeEntry);
while (entry)
{
ExtendedBucketPrunerData& data = entry->second;
// data to be removed
if (data.mTimeStamp == timeStamp)
{
removeEntry = true;
numRemovedEntries++;
}
else
{
// update the merge index and main tree node index
PX_ASSERT(highestTreeIndex < data.mMergeIndex);
data.mMergeIndex -= mergeTreeOffset;
removeEntry = false;
}
entry = eraseIterator.eraseCurrentGetNext(removeEntry);
}
#if PX_DEBUG
checkValidity();
#endif // PX_DEBUG
// return the number of removed objects
return retVal + numRemovedEntries;
}
//////////////////////////////////////////////////////////////////////////
// clean all trees, all objects have been released
void ExtendedBucketPruner::cleanTrees()
{
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
mMergedTrees[i].mTree->release();
mMergedTrees[i].mTimeStamp = 0;
}
mExtendedBucketPrunerMap.clear();
mCurrentTreeIndex = 0;
mMainTree->release();
}
//////////////////////////////////////////////////////////////////////////
// shift origin
void ExtendedBucketPruner::shiftOrigin(const PxVec3& shift)
{
mMainTree->shiftOrigin(shift);
for(PxU32 i=0; i<mCurrentTreeIndex; i++)
mMergedTrees[i].mTree->shiftOrigin(shift);
if(mCompanion)
mCompanion->shiftOrigin(shift);
}
//////////////////////////////////////////////////////////////////////////
// Queries implementation
//////////////////////////////////////////////////////////////////////////
// Raycast/sweeps callback for main AABB tree
template<const bool tInflate>
struct MainTreeRaycastPrunerCallback
{
MainTreeRaycastPrunerCallback(const PxVec3& origin, const PxVec3& unitDir, const PxVec3& extent, PrunerRaycastCallback& prunerCallback, const PruningPool* pool, const MergedTree* mergedTrees)
: mOrigin(origin), mUnitDir(unitDir), mExtent(extent), mPrunerCallback(prunerCallback), mPruningPool(pool), mMergedTrees(mergedTrees)
{
}
bool invoke(PxReal& distance, PxU32 primIndex)
{
const AABBTree* aabbTree = mMergedTrees[primIndex].mTree;
// raycast the merged tree
RaycastCallbackAdapter pcb(mPrunerCallback, *mPruningPool);
return AABBTreeRaycast<tInflate, true, AABBTree, BVHNode, RaycastCallbackAdapter>()(mPruningPool->getCurrentAABBTreeBounds(), *aabbTree, mOrigin, mUnitDir, distance, mExtent, pcb);
}
PX_NOCOPY(MainTreeRaycastPrunerCallback)
private:
const PxVec3& mOrigin;
const PxVec3& mUnitDir;
const PxVec3& mExtent;
PrunerRaycastCallback& mPrunerCallback;
const PruningPool* mPruningPool;
const MergedTree* mMergedTrees;
};
//////////////////////////////////////////////////////////////////////////
// raycast against the extended bucket pruner
bool ExtendedBucketPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
bool again = mCompanion ? mCompanion->raycast(origin, unitDir, inOutDistance, prunerCallback) : true;
if(again && mExtendedBucketPrunerMap.size())
{
const PxVec3 extent(0.0f);
// main tree callback
MainTreeRaycastPrunerCallback<false> pcb(origin, unitDir, extent, prunerCallback, mPruningPool, mMergedTrees);
// traverse the main tree
again = AABBTreeRaycast<false, true, AABBTree, BVHNode, MainTreeRaycastPrunerCallback<false>>()(mBounds, *mMainTree, origin, unitDir, inOutDistance, extent, pcb);
}
return again;
}
//////////////////////////////////////////////////////////////////////////
// overlap main tree callback
template<typename Test>
struct MainTreeOverlapPrunerCallback
{
MainTreeOverlapPrunerCallback(const Test& test, PrunerOverlapCallback& prunerCallback, const PruningPool* pool, const MergedTree* mergedTrees)
: mTest(test), mPrunerCallback(prunerCallback), mPruningPool(pool), mMergedTrees(mergedTrees)
{
}
bool invoke(PxU32 primIndex)
{
const AABBTree* aabbTree = mMergedTrees[primIndex].mTree;
// overlap the merged tree
OverlapCallbackAdapter pcb(mPrunerCallback, *mPruningPool);
return AABBTreeOverlap<true, Test, AABBTree, BVHNode, OverlapCallbackAdapter>()(mPruningPool->getCurrentAABBTreeBounds(), *aabbTree, mTest, pcb);
}
PX_NOCOPY(MainTreeOverlapPrunerCallback)
private:
const Test& mTest;
PrunerOverlapCallback& mPrunerCallback;
const PruningPool* mPruningPool;
const MergedTree* mMergedTrees;
};
//////////////////////////////////////////////////////////////////////////
// overlap implementation
bool ExtendedBucketPruner::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const
{
bool again = mCompanion ? mCompanion->overlap(queryVolume, prunerCallback) : true;
if(again && mExtendedBucketPrunerMap.size())
{
switch (queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if (queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
MainTreeOverlapPrunerCallback<OBBAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<OBBAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
MainTreeOverlapPrunerCallback<AABBAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, AABBAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<AABBAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
MainTreeOverlapPrunerCallback<CapsuleAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, CapsuleAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<CapsuleAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
MainTreeOverlapPrunerCallback<SphereAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, SphereAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<SphereAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
MainTreeOverlapPrunerCallback<OBBAABBTest> pcb(test, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeOverlap<true, OBBAABBTest, AABBTree, BVHNode, MainTreeOverlapPrunerCallback<OBBAABBTest>>()(mBounds, *mMainTree, test, pcb);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
return again;
}
//////////////////////////////////////////////////////////////////////////
// sweep implementation
bool ExtendedBucketPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
bool again = mCompanion ? mCompanion->sweep(queryVolume, unitDir, inOutDistance, prunerCallback) : true;
if(again && mExtendedBucketPrunerMap.size())
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
const PxVec3 extents = aabb.getExtents();
const PxVec3 center = aabb.getCenter();
MainTreeRaycastPrunerCallback<true> pcb(center, unitDir, extents, prunerCallback, mPruningPool, mMergedTrees);
again = AABBTreeRaycast<true, true, AABBTree, BVHNode, MainTreeRaycastPrunerCallback<true>>()(mBounds, *mMainTree, center, unitDir, inOutDistance, extents, pcb);
}
return again;
}
//////////////////////////////////////////////////////////////////////////
void ExtendedBucketPruner::getGlobalBounds(PxBounds3& bounds) const
{
if(mCompanion)
mCompanion->getGlobalBounds(bounds);
else
bounds.setEmpty();
if(mExtendedBucketPrunerMap.size() && mMainTree && mMainTree->getNodes())
bounds.include(mMainTree->getNodes()->mBV);
}
//////////////////////////////////////////////////////////////////////////
void ExtendedBucketPruner::visualize(PxRenderOutput& out, PxU32 color) const
{
visualizeTree(out, color, mMainTree);
for(PxU32 i=0; i<mCurrentTreeIndex; i++)
visualizeTree(out, color, mMergedTrees[i].mTree);
if(mCompanion)
mCompanion->visualize(out, color);
}
//////////////////////////////////////////////////////////////////////////
#if PX_DEBUG
// extended bucket pruner validity check
bool ExtendedBucketPruner::checkValidity()
{
PxBitMap testBitmap;
testBitmap.resizeAndClear(mCurrentTreeIndex);
for (PxU32 i = 0; i < mMainTree->getNbNodes(); i++)
{
const BVHNode& node = mMainTree->getNodes()[i];
if(node.isLeaf())
{
const PxU32 nbPrims = node.getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= EXT_NB_OBJECTS_PER_NODE);
const PxU32* primitives = node.getPrimitives(mMainTree->getIndices());
for (PxU32 j = 0; j < nbPrims; j++)
{
const PxU32 index = primitives[j];
// check if index is correct
PX_ASSERT(index < mCurrentTreeIndex);
// mark the index in the test bitmap, must be once set only, all merged trees must be in the main tree
PX_ASSERT(testBitmap.test(index) == PxIntFalse);
testBitmap.set(index);
}
}
}
PxBitMap mergeTreeTestBitmap;
mergeTreeTestBitmap.resizeAndClear(mPruningPool->getNbActiveObjects());
for (PxU32 i = 0; i < mCurrentTreeIndex; i++)
{
// check if bounds are the same as the merged tree root bounds
PX_ASSERT(mBounds.getBounds()[i].maximum.x == mMergedTrees[i].mTree->getNodes()[0].mBV.maximum.x);
PX_ASSERT(mBounds.getBounds()[i].maximum.y == mMergedTrees[i].mTree->getNodes()[0].mBV.maximum.y);
PX_ASSERT(mBounds.getBounds()[i].maximum.z == mMergedTrees[i].mTree->getNodes()[0].mBV.maximum.z);
PX_ASSERT(mBounds.getBounds()[i].minimum.x == mMergedTrees[i].mTree->getNodes()[0].mBV.minimum.x);
PX_ASSERT(mBounds.getBounds()[i].minimum.y == mMergedTrees[i].mTree->getNodes()[0].mBV.minimum.y);
PX_ASSERT(mBounds.getBounds()[i].minimum.z == mMergedTrees[i].mTree->getNodes()[0].mBV.minimum.z);
// check each tree
const AABBTree& mergedTree = *mMergedTrees[i].mTree;
for (PxU32 j = 0; j < mergedTree.getNbNodes(); j++)
{
const BVHNode& node = mergedTree.getNodes()[j];
if (node.isLeaf())
{
const PxU32 nbPrims = node.getNbRuntimePrimitives();
PX_ASSERT(nbPrims <= EXT_NB_OBJECTS_PER_NODE);
const PxU32* primitives = node.getPrimitives(mergedTree.getIndices());
for (PxU32 k = 0; k < nbPrims; k++)
{
const PxU32 index = primitives[k];
// check if index is correct
PX_ASSERT(index < mPruningPool->getNbActiveObjects());
// mark the index in the test bitmap, must be once set only, all merged trees must be in the main tree
PX_ASSERT(mergeTreeTestBitmap.test(index) == PxIntFalse);
mergeTreeTestBitmap.set(index);
const PrunerPayload& payload = mPruningPool->getObjects()[index];
const ExtendedBucketPrunerMap::Entry* extendedPrunerSwapEntry = mExtendedBucketPrunerMap.find(payload);
PX_ASSERT(extendedPrunerSwapEntry);
const ExtendedBucketPrunerData& data = extendedPrunerSwapEntry->second;
PX_ASSERT(data.mMergeIndex == i);
PX_ASSERT(data.mSubTreeNode == j);
}
}
}
}
for (PxU32 i = mCurrentTreeIndex; i < mCurrentTreeCapacity; i++)
{
PX_ASSERT(mMergedTrees[i].mTree->getIndices() == NULL);
PX_ASSERT(mMergedTrees[i].mTree->getNodes() == NULL);
}
for (ExtendedBucketPrunerMap::Iterator iter = mExtendedBucketPrunerMap.getIterator(); !iter.done(); ++iter)
{
const ExtendedBucketPrunerData& data = iter->second;
PX_ASSERT(mMainTreeUpdateMap[data.mMergeIndex] < mMainTree->getNbNodes());
PX_ASSERT(data.mMergeIndex < mCurrentTreeIndex);
PX_ASSERT(data.mSubTreeNode < mMergedTrees[data.mMergeIndex].mTree->getNbNodes());
}
return true;
}
#endif
| 30,845 | C++ | 34.577855 | 192 | 0.70462 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBounds.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuBounds.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxPlaneGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxTetrahedronMeshGeometry.h"
#include "geometry/PxTriangleMeshGeometry.h"
#include "geometry/PxHeightFieldGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "GuInternal.h"
#include "CmUtils.h"
#include "GuConvexMesh.h"
#include "GuConvexMeshData.h"
#include "GuTriangleMesh.h"
#include "GuTetrahedronMesh.h"
#include "GuHeightFieldData.h"
#include "GuHeightField.h"
#include "GuConvexUtilsInternal.h"
#include "GuBoxConversion.h"
using namespace physx;
using namespace Gu;
using namespace aos;
// Compute global box for current node. The box is stored in mBV.
void Gu::computeGlobalBox(PxBounds3& bounds, PxU32 nbPrims, const PxBounds3* PX_RESTRICT boxes, const PxU32* PX_RESTRICT primitives)
{
PX_ASSERT(boxes);
PX_ASSERT(primitives);
PX_ASSERT(nbPrims);
Vec4V minV = V4LoadU(&boxes[primitives[0]].minimum.x);
Vec4V maxV = V4LoadU(&boxes[primitives[0]].maximum.x);
for (PxU32 i=1; i<nbPrims; i++)
{
const PxU32 index = primitives[i];
minV = V4Min(minV, V4LoadU(&boxes[index].minimum.x));
maxV = V4Max(maxV, V4LoadU(&boxes[index].maximum.x));
}
StoreBounds(bounds, minV, maxV);
}
void Gu::computeBoundsAroundVertices(PxBounds3& bounds, PxU32 nbVerts, const PxVec3* PX_RESTRICT verts)
{
// PT: we can safely V4LoadU the first N-1 vertices. We must V3LoadU the last vertex, to make sure we don't read
// invalid memory. Since we have to special-case that last vertex anyway, we reuse that code to also initialize
// the minV/maxV values (bypassing the need for a 'setEmpty()' initialization).
if(!nbVerts)
{
bounds.setEmpty();
return;
}
PxU32 nbSafe = nbVerts-1;
// PT: read last (unsafe) vertex using V3LoadU, initialize minV/maxV
const Vec4V lastVertexV = Vec4V_From_Vec3V(V3LoadU(&verts[nbSafe].x));
Vec4V minV = lastVertexV;
Vec4V maxV = lastVertexV;
// PT: read N-1 first (safe) vertices using V4LoadU
while(nbSafe--)
{
const Vec4V vertexV = V4LoadU(&verts->x);
verts++;
minV = V4Min(minV, vertexV);
maxV = V4Max(maxV, vertexV);
}
StoreBounds(bounds, minV, maxV);
}
void Gu::computeLocalBoundsAndGeomEpsilon(const PxVec3* vertices, PxU32 nbVerties, PxBounds3& localBounds, PxReal& geomEpsilon)
{
computeBoundsAroundVertices(localBounds, nbVerties, vertices);
// Derive a good geometric epsilon from local bounds. We must do this before bounds extrusion for heightfields.
//
// From Charles Bloom:
// "Epsilon must be big enough so that the consistency condition abs(D(Hit))
// <= Epsilon is satisfied for all queries. You want the smallest epsilon
// you can have that meets that constraint. Normal floats have a 24 bit
// mantissa. When you do any float addition, you may have round-off error
// that makes the result off by roughly 2^-24 * result. Our result is
// scaled by the position values. If our world is strictly required to be
// in a box of world size W (each coordinate in -W to W), then the maximum
// error is 2^-24 * W. Thus Epsilon must be at least >= 2^-24 * W. If
// you're doing coordinate transforms, that may scale your error up by some
// amount, so you'll need a bigger epsilon. In general something like
// 2^-22*W is reasonable. If you allow scaled transforms, it needs to be
// something like 2^-22*W*MAX_SCALE."
// PT: TODO: runtime checkings for this
PxReal eps = 0.0f;
for (PxU32 i = 0; i < 3; i++)
eps = PxMax(eps, PxMax(PxAbs(localBounds.maximum[i]), PxAbs(localBounds.minimum[i])));
eps *= powf(2.0f, -22.0f);
geomEpsilon = eps;
}
static PX_FORCE_INLINE void transformNoEmptyTest(PxVec3p& c, PxVec3p& ext, const PxMat33& rot, const PxVec3& pos, const CenterExtentsPadded& bounds)
{
c = rot.transform(bounds.mCenter) + pos;
ext = Cm::basisExtent(rot.column0, rot.column1, rot.column2, bounds.mExtents);
}
// PT: this one may have duplicates in GuBV4_BoxSweep_Internal.h & GuBV4_Raycast.cpp
static PX_FORCE_INLINE Vec4V multiply3x3V(const Vec4V p, const PxMat33Padded& mat_Padded)
{
Vec4V ResV = V4Scale(V4LoadU(&mat_Padded.column0.x), V4GetX(p));
ResV = V4Add(ResV, V4Scale(V4LoadU(&mat_Padded.column1.x), V4GetY(p)));
ResV = V4Add(ResV, V4Scale(V4LoadU(&mat_Padded.column2.x), V4GetZ(p)));
return ResV;
}
static PX_FORCE_INLINE void transformNoEmptyTestV(PxVec3p& c, PxVec3p& ext, const PxMat33Padded& rot, const PxVec3& pos, const CenterExtentsPadded& bounds)
{
const Vec4V boundsCenterV = V4LoadU(&bounds.mCenter.x); // PT: this load is safe since extents follow center in the class
// PT: unfortunately we can't V4LoadU 'pos' directly (it can come directly from users!). So we have to live with this for now:
const Vec4V posV = Vec4V_From_Vec3V(V3LoadU(&pos.x));
// PT: but eventually we'd like to use the "unsafe" version (e.g. by switching p&q in PxTransform), which would save 6 instructions on Win32
const Vec4V cV = V4Add(multiply3x3V(boundsCenterV, rot), posV);
// const Vec4V cV = V4Add(multiply3x3V(boundsCenterV, rot), V4LoadU(&pos.x)); // ### unsafe
V4StoreU(cV, &c.x);
// extended basis vectors
const Vec4V boundsExtentsV = V4LoadU(&bounds.mExtents.x); // PT: this load is safe since bounds are padded
const Vec4V c0V = V4Scale(V4LoadU(&rot.column0.x), V4GetX(boundsExtentsV));
const Vec4V c1V = V4Scale(V4LoadU(&rot.column1.x), V4GetY(boundsExtentsV));
const Vec4V c2V = V4Scale(V4LoadU(&rot.column2.x), V4GetZ(boundsExtentsV));
// find combination of base vectors that produces max. distance for each component = sum of abs()
Vec4V extentsV = V4Add(V4Abs(c0V), V4Abs(c1V));
extentsV = V4Add(extentsV, V4Abs(c2V));
V4StoreU(extentsV, &ext.x);
}
static PX_FORCE_INLINE PxU32 isNonIdentity(const PxVec3& scale)
{
#define IEEE_1_0 0x3f800000 //!< integer representation of 1.0
const PxU32* binary = reinterpret_cast<const PxU32*>(&scale.x);
return (binary[0] - IEEE_1_0)|(binary[1] - IEEE_1_0)|(binary[2] - IEEE_1_0);
}
// PT: please don't inline this one - 300+ lines of rarely used code
static void computeScaledMatrix(PxMat33Padded& rot, const PxMeshScale& scale)
{
rot = rot * Cm::toMat33(scale);
}
static PX_FORCE_INLINE void transformNoEmptyTest(PxVec3p& c, PxVec3p& ext, const PxTransform& transform, const PxMeshScale& scale, const CenterExtentsPadded& bounds)
{
PxMat33Padded rot(transform.q);
if(isNonIdentity(scale.scale))
computeScaledMatrix(rot, scale);
transformNoEmptyTestV(c, ext, rot, transform.p, bounds);
}
static PX_FORCE_INLINE void transformNoEmptyTest(PxVec3p& c, PxVec3p& ext, const PxVec3& pos, const PxMat33Padded& rot, const PxMeshScale& scale, const CenterExtentsPadded& bounds)
{
if(scale.isIdentity())
transformNoEmptyTest(c, ext, rot, pos, bounds);
else
transformNoEmptyTest(c, ext, rot * Cm::toMat33(scale), pos, bounds);
}
static void computeMeshBounds(const PxTransform& pose, const CenterExtentsPadded* PX_RESTRICT localSpaceBounds, const PxMeshScale& meshScale, PxVec3p& origin, PxVec3p& extent)
{
transformNoEmptyTest(origin, extent, pose, meshScale, *localSpaceBounds);
}
static void computePlaneBounds(PxBounds3& bounds, const PxTransform& pose, float contactOffset, float inflation)
{
// PT: A plane is infinite, so usually the bounding box covers the whole world.
// Now, in particular cases when the plane is axis-aligned, we can take
// advantage of this to compute a smaller bounding box anyway.
// PT: we use PX_MAX_BOUNDS_EXTENTS to be compatible with PxBounds3::setMaximal,
// and to make sure that the value doesn't collide with the BP's sentinels.
const PxF32 bigValue = PX_MAX_BOUNDS_EXTENTS;
// const PxF32 bigValue = 1000000.0f;
PxVec3 minPt = PxVec3(-bigValue, -bigValue, -bigValue);
PxVec3 maxPt = PxVec3(bigValue, bigValue, bigValue);
const PxVec3 planeNormal = pose.q.getBasisVector0();
const PxPlane plane(pose.p, planeNormal);
const float nx = PxAbs(planeNormal.x);
const float ny = PxAbs(planeNormal.y);
const float nz = PxAbs(planeNormal.z);
const float epsilon = 1e-6f;
const float oneMinusEpsilon = 1.0f - epsilon;
if(nx>oneMinusEpsilon && ny<epsilon && nz<epsilon)
{
if(planeNormal.x>0.0f) maxPt.x = -plane.d + contactOffset;
else minPt.x = plane.d - contactOffset;
}
else if(nx<epsilon && ny>oneMinusEpsilon && nz<epsilon)
{
if(planeNormal.y>0.0f) maxPt.y = -plane.d + contactOffset;
else minPt.y = plane.d - contactOffset;
}
else if(nx<epsilon && ny<epsilon && nz>oneMinusEpsilon)
{
if(planeNormal.z>0.0f) maxPt.z = -plane.d + contactOffset;
else minPt.z = plane.d - contactOffset;
}
// PT: it is important to compute the min/max form directly without going through the
// center/extents intermediate form. With PX_MAX_BOUNDS_EXTENTS, those back-and-forth
// computations destroy accuracy.
// PT: inflation actually destroys the bounds really. We keep it to please UTs but this is broken (DE10595).
// (e.g. for SQ 1% of PX_MAX_BOUNDS_EXTENTS is still a huge number, effectively making the AABB infinite and defeating the point of the above computation)
if(inflation!=1.0f)
{
const PxVec3 c = (maxPt + minPt)*0.5f;
const PxVec3 e = (maxPt - minPt)*0.5f*inflation;
minPt = c - e;
maxPt = c + e;
}
bounds.minimum = minPt;
bounds.maximum = maxPt;
}
static PX_FORCE_INLINE void inflateBounds(PxBounds3& bounds, const PxVec3p& origin, const PxVec3p& extents, float contactOffset, float inflation)
{
Vec4V extentsV = V4LoadU(&extents.x);
extentsV = V4Add(extentsV, V4Load(contactOffset));
extentsV = V4Scale(extentsV, FLoad(inflation));
const Vec4V originV = V4LoadU(&origin.x);
const Vec4V minV = V4Sub(originV, extentsV);
const Vec4V maxV = V4Add(originV, extentsV);
StoreBounds(bounds, minV, maxV);
}
static PX_FORCE_INLINE Vec4V basisExtentV(const PxMat33Padded& basis, const PxVec3& extent, float offset, float inflation)
{
// extended basis vectors
const Vec4V c0V = V4Scale(V4LoadU(&basis.column0.x), FLoad(extent.x));
const Vec4V c1V = V4Scale(V4LoadU(&basis.column1.x), FLoad(extent.y));
const Vec4V c2V = V4Scale(V4LoadU(&basis.column2.x), FLoad(extent.z));
// find combination of base vectors that produces max. distance for each component = sum of abs()
Vec4V extentsV = V4Add(V4Abs(c0V), V4Abs(c1V));
extentsV = V4Add(extentsV, V4Abs(c2V));
extentsV = V4Add(extentsV, V4Load(offset));
extentsV = V4Scale(extentsV, FLoad(inflation));
return extentsV;
}
static PX_FORCE_INLINE void computeMeshBounds(PxBounds3& bounds, float contactOffset, float inflation, const PxTransform& pose, const CenterExtentsPadded* PX_RESTRICT localSpaceBounds, const PxMeshScale& scale)
{
PxVec3p origin, extents;
computeMeshBounds(pose, localSpaceBounds, scale, origin, extents);
::inflateBounds(bounds, origin, extents, contactOffset, inflation);
}
void Gu::computeTightBounds(PxBounds3& bounds, PxU32 nb, const PxVec3* PX_RESTRICT v, const PxTransform& pose, const PxMeshScale& scale, float contactOffset, float inflation)
{
if(!nb)
{
bounds.setEmpty();
return;
}
PxMat33Padded rot(pose.q);
if(isNonIdentity(scale.scale))
computeScaledMatrix(rot, scale);
// PT: we can safely V4LoadU the first N-1 vertices. We must V3LoadU the last vertex, to make sure we don't read
// invalid memory. Since we have to special-case that last vertex anyway, we reuse that code to also initialize
// the minV/maxV values (bypassing the need for a 'setEmpty()' initialization).
PxU32 nbSafe = nb-1;
// PT: read last (unsafe) vertex using V3LoadU, initialize minV/maxV
const Vec4V lastVertexV = multiply3x3V(Vec4V_From_Vec3V(V3LoadU(&v[nbSafe].x)), rot);
Vec4V minV = lastVertexV;
Vec4V maxV = lastVertexV;
// PT: read N-1 first (safe) vertices using V4LoadU
while(nbSafe--)
{
const Vec4V vertexV = multiply3x3V(V4LoadU(&v->x), rot);
v++;
minV = V4Min(minV, vertexV);
maxV = V4Max(maxV, vertexV);
}
const Vec4V offsetV = V4Load(contactOffset);
minV = V4Sub(minV, offsetV);
maxV = V4Add(maxV, offsetV);
const Vec4V posV = Vec4V_From_Vec3V(V3LoadU(&pose.p.x));
maxV = V4Add(maxV, posV);
minV = V4Add(minV, posV);
// Inflation
{
const Vec4V centerV = V4Scale(V4Add(maxV, minV), FLoad(0.5f));
const Vec4V extentsV = V4Scale(V4Sub(maxV, minV), FLoad(0.5f*inflation));
maxV = V4Add(centerV, extentsV);
minV = V4Sub(centerV, extentsV);
}
StoreBounds(bounds, minV, maxV);
}
void Gu::computeBounds(PxBounds3& bounds, const PxGeometry& geometry, const PxTransform& pose, float contactOffset, float inflation)
{
// Box, Convex, Mesh and HeightField will compute local bounds and pose to world space.
// Sphere, Capsule & Plane will compute world space bounds directly.
switch(geometry.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& shape = static_cast<const PxSphereGeometry&>(geometry);
const PxVec3 extents((shape.radius+contactOffset)*inflation);
bounds.minimum = pose.p - extents;
bounds.maximum = pose.p + extents;
}
break;
case PxGeometryType::ePLANE:
{
computePlaneBounds(bounds, pose, contactOffset, inflation);
}
break;
case PxGeometryType::eCAPSULE:
{
computeCapsuleBounds(bounds, static_cast<const PxCapsuleGeometry&>(geometry), pose, contactOffset, inflation);
}
break;
case PxGeometryType::eBOX:
{
const PxBoxGeometry& shape = static_cast<const PxBoxGeometry&>(geometry);
const PxVec3p origin(pose.p);
const PxMat33Padded basis(pose.q);
const Vec4V extentsV = basisExtentV(basis, shape.halfExtents, contactOffset, inflation);
const Vec4V originV = V4LoadU(&origin.x);
const Vec4V minV = V4Sub(originV, extentsV);
const Vec4V maxV = V4Add(originV, extentsV);
StoreBounds(bounds, minV, maxV);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& shape = static_cast<const PxConvexMeshGeometry&>(geometry);
const Gu::ConvexHullData& hullData = static_cast<const Gu::ConvexMesh*>(shape.convexMesh)->getHull();
const bool useTightBounds = shape.meshFlags & PxConvexMeshGeometryFlag::eTIGHT_BOUNDS;
if(useTightBounds)
computeTightBounds(bounds, hullData.mNbHullVertices, hullData.getHullVertices(), pose, shape.scale, contactOffset, inflation);
else
computeMeshBounds(bounds, contactOffset, inflation, pose, &hullData.getPaddedBounds(), shape.scale);
}
break;
case PxGeometryType::eTRIANGLEMESH:
{
const PxTriangleMeshGeometry& shape = static_cast<const PxTriangleMeshGeometry&>(geometry);
const TriangleMesh* triangleMesh = static_cast<const TriangleMesh*>(shape.triangleMesh);
const bool useTightBounds = shape.meshFlags & PxMeshGeometryFlag::eTIGHT_BOUNDS;
if(useTightBounds)
computeTightBounds(bounds, triangleMesh->getNbVerticesFast(), triangleMesh->getVerticesFast(), pose, shape.scale, contactOffset, inflation);
else
computeMeshBounds(bounds, contactOffset, inflation, pose, &triangleMesh->getPaddedBounds(), shape.scale);
}
break;
case PxGeometryType::eHEIGHTFIELD:
{
const PxHeightFieldGeometry& shape = static_cast<const PxHeightFieldGeometry&>(geometry);
computeMeshBounds(bounds, contactOffset, inflation, pose, &static_cast<const Gu::HeightField*>(shape.heightField)->getData().getPaddedBounds(), PxMeshScale(PxVec3(shape.rowScale, shape.heightScale, shape.columnScale)));
}
break;
case PxGeometryType::eTETRAHEDRONMESH:
{
const PxTetrahedronMeshGeometry& shape = static_cast<const PxTetrahedronMeshGeometry&>(geometry);
computeMeshBounds(bounds, contactOffset, inflation, pose, &static_cast<const Gu::TetrahedronMesh*>(shape.tetrahedronMesh)->getPaddedBounds(), PxMeshScale());
}
break;
case PxGeometryType::ePARTICLESYSTEM:
{
// implement!
PX_ASSERT(0);
}
break;
case PxGeometryType::eHAIRSYSTEM:
{
// jcarius: Hairsystem bounds only available on GPU
bounds.setEmpty();
}
break;
case PxGeometryType::eCUSTOM:
{
const PxCustomGeometry& shape = static_cast<const PxCustomGeometry&>(geometry);
PxVec3p centre(0), extents(0);
if (shape.callbacks)
{
const PxBounds3 b = shape.callbacks->getLocalBounds(shape);
centre = b.getCenter(); extents = b.getExtents();
}
const PxVec3p origin(pose.transform(centre));
const PxMat33Padded basis(pose.q);
const Vec4V extentsV = basisExtentV(basis, extents, contactOffset, inflation);
const Vec4V originV = V4LoadU(&origin.x);
const Vec4V minV = V4Sub(originV, extentsV);
const Vec4V maxV = V4Add(originV, extentsV);
StoreBounds(bounds, minV, maxV);
}
break;
default:
{
PX_ASSERT(0);
PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "Gu::computeBounds: Unknown shape type.");
}
}
}
static PX_FORCE_INLINE void computeBoxExtentsAroundCapsule(PxVec3& extents, const PxCapsuleGeometry& capsuleGeom, float inflation)
{
extents.x = (capsuleGeom.radius + capsuleGeom.halfHeight) * inflation;
extents.y = capsuleGeom.radius * inflation;
extents.z = capsuleGeom.radius * inflation;
}
static const PxReal SQ_PRUNER_INFLATION = 1.01f; // pruner test shape inflation (not narrow phase shape)
static void computeMeshBounds(const PxVec3& pos, const PxMat33Padded& rot, const CenterExtentsPadded* PX_RESTRICT localSpaceBounds, const PxMeshScale& meshScale, PxVec3p& origin, PxVec3p& extent)
{
PxPrefetchLine(localSpaceBounds); // PT: this one helps reducing L2 misses in transformNoEmptyTest
transformNoEmptyTest(origin, extent, pos, rot, meshScale, *localSpaceBounds);
}
// PT: warning: this writes 4 bytes after the end of 'bounds'. Calling code must ensure it is safe to do so.
static PX_FORCE_INLINE void computeMinMaxBounds(PxBounds3* PX_RESTRICT bounds, const PxVec3p& c, const PxVec3p& e, float prunerInflation, float offset)
{
const Vec4V extentsV = V4Scale(V4Add(V4LoadU(&e.x), V4Load(offset)), FLoad(prunerInflation));
const Vec4V centerV = V4LoadU(&c.x);
const Vec4V minV = V4Sub(centerV, extentsV);
const Vec4V maxV = V4Add(centerV, extentsV);
V4StoreU(minV, &bounds->minimum.x);
V4StoreU(maxV, &bounds->maximum.x);
}
ShapeData::ShapeData(const PxGeometry& g, const PxTransform& t, PxReal inflation)
{
using namespace physx::aos;
// PT: this cast to matrix is already done in GeometryUnion::computeBounds (e.g. for boxes). So we do it first,
// then we'll pass the matrix directly to computeBoundsShapeData, to avoid the double conversion.
const bool isOBB = PxAbs(t.q.w) < 0.999999f;
if(isOBB)
{
// PT: writes 4 bytes after 'rot' but it's safe since we then write 'center' just afterwards
buildFrom(mGuBox, t.q);
}
else
{
mGuBox.rot = PxMat33(PxIdentity);
}
// PT: can't use V4Load here since there's no guarantee on 't.p'
// PT: must store 'center' after 'rot' now
mGuBox.center = t.p;
// Compute AABB, used by the BucketPruner as cullBox
switch(g.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& shape = static_cast<const PxSphereGeometry&>(g);
computeMinMaxBounds(&mPrunerInflatedAABB, mGuBox.center, PxVec3(0.0f), SQ_PRUNER_INFLATION, shape.radius+inflation);
//
reinterpret_cast<Sphere&>(mGuSphere) = Sphere(t.p, shape.radius);
}
break;
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& shape = static_cast<const PxCapsuleGeometry&>(g);
const PxVec3p extents = mGuBox.rot.column0.abs() * shape.halfHeight;
computeMinMaxBounds(&mPrunerInflatedAABB, mGuBox.center, extents, SQ_PRUNER_INFLATION, shape.radius+inflation);
//
Capsule& dstWorldCapsule = reinterpret_cast<Capsule&>(mGuCapsule); // store a narrow phase version copy
getCapsule(dstWorldCapsule, shape, t);
mGuBox.extents.x = shape.halfHeight;
// compute PxBoxGeometry pruner geom around input capsule geom; transform remains unchanged
computeBoxExtentsAroundCapsule(mPrunerBoxGeomExtents, shape, SQ_PRUNER_INFLATION);
}
break;
case PxGeometryType::eBOX:
{
const PxBoxGeometry& shape = static_cast<const PxBoxGeometry&>(g);
// PT: cast is safe because 'rot' followed by other members
Vec4V extentsV = basisExtentV(static_cast<const PxMat33Padded&>(mGuBox.rot), shape.halfExtents, inflation, SQ_PRUNER_INFLATION);
// PT: c/e-to-m/M conversion
const Vec4V centerV = V4LoadU(&mGuBox.center.x);
const Vec4V minV = V4Sub(centerV, extentsV);
const Vec4V maxV = V4Add(centerV, extentsV);
V4StoreU(minV, &mPrunerInflatedAABB.minimum.x);
V4StoreU(maxV, &mPrunerInflatedAABB.maximum.x); // PT: WARNING: writes past end of class
//
mGuBox.extents = shape.halfExtents; // PT: TODO: use SIMD
mPrunerBoxGeomExtents = shape.halfExtents*SQ_PRUNER_INFLATION;
}
break;
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& shape = static_cast<const PxConvexMeshGeometry&>(g);
const ConvexMesh* cm = static_cast<const ConvexMesh*>(shape.convexMesh);
const ConvexHullData* hullData = &cm->getHull();
// PT: cast is safe since 'rot' is followed by other members of the box
PxVec3p center, extents;
computeMeshBounds(mGuBox.center, static_cast<const PxMat33Padded&>(mGuBox.rot), &hullData->getPaddedBounds(), shape.scale, center, extents);
computeMinMaxBounds(&mPrunerInflatedAABB, center, extents, SQ_PRUNER_INFLATION, inflation);
//
Box prunerBox;
computeOBBAroundConvex(prunerBox, shape, cm, t);
mGuBox.rot = prunerBox.rot; // PT: TODO: optimize this copy
// AP: pruners are now responsible for growing the OBB by 1% for overlap/sweep/GJK accuracy
mPrunerBoxGeomExtents = prunerBox.extents*SQ_PRUNER_INFLATION;
mGuBox.center = prunerBox.center;
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("PhysX internal error: Invalid shape in ShapeData contructor.");
}
// PT: WARNING: these writes must stay after the above code
mIsOBB = PxU32(isOBB);
mType = PxU16(g.getType());
}
| 23,441 | C++ | 36.932039 | 222 | 0.738194 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMeshFactory.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "common/PxInsertionCallback.h"
#include "GuCooking.h"
#include "GuMeshFactory.h"
#include "GuTriangleMeshBV4.h"
#include "GuTriangleMeshRTree.h"
#include "GuTetrahedronMesh.h"
#include "GuConvexMesh.h"
#include "GuBVH.h"
#include "GuHeightField.h"
#if PX_SUPPORT_OMNI_PVD
# define OMNI_PVD_NOTIFY_ADD(OBJECT) notifyListenersAdd(OBJECT)
# define OMNI_PVD_NOTIFY_REMOVE(OBJECT) notifyListenersRemove(OBJECT)
#else
# define OMNI_PVD_NOTIFY_ADD(OBJECT)
# define OMNI_PVD_NOTIFY_REMOVE(OBJECT)
#endif
using namespace physx;
using namespace Gu;
using namespace Cm;
///////////////////////////////////////////////////////////////////////////////
PX_IMPLEMENT_OUTPUT_ERROR
///////////////////////////////////////////////////////////////////////////////
// PT: TODO: refactor all this with a dedicated container
MeshFactory::MeshFactory() :
mTriangleMeshes ("mesh factory triangle mesh hash"),
mConvexMeshes ("mesh factory convex mesh hash"),
mHeightFields ("mesh factory height field hash"),
mBVHs ("BVH factory hash"),
mFactoryListeners ("FactoryListeners")
{
}
MeshFactory::~MeshFactory()
{
}
///////////////////////////////////////////////////////////////////////////////
template<class T>
static void releaseObjects(PxCoalescedHashSet<T*>& objects)
{
while(objects.size())
{
T* object = objects.getEntries()[0];
PX_ASSERT(RefCountable_getRefCount(*object)==1);
object->release();
}
}
// PT: needed because Gu::BVH is not a PxRefCounted object, although it derives from RefCountable
static void releaseObjects(PxCoalescedHashSet<Gu::BVH*>& objects)
{
while(objects.size())
{
Gu::BVH* object = objects.getEntries()[0];
PX_ASSERT(object->getRefCount()==1);
object->release();
}
}
void MeshFactory::release()
{
// Release all objects in case the user didn't do it
releaseObjects(mTriangleMeshes);
releaseObjects(mTetrahedronMeshes);
releaseObjects(mSoftBodyMeshes);
releaseObjects(mConvexMeshes);
releaseObjects(mHeightFields);
releaseObjects(mBVHs);
PX_DELETE_THIS;
}
template <typename T>
static void addToHash(PxCoalescedHashSet<T*>& hash, T* element, PxMutex* mutex)
{
if(!element)
return;
if(mutex)
mutex->lock();
hash.insert(element);
if(mutex)
mutex->unlock();
}
///////////////////////////////////////////////////////////////////////////////
static void read8BitIndices(PxInputStream& stream, void* tris, PxU32 nbIndices, const bool has16BitIndices)
{
PxU8 x;
if(has16BitIndices)
{
PxU16* tris16 = reinterpret_cast<PxU16*>(tris);
for(PxU32 i=0;i<nbIndices;i++)
{
stream.read(&x, sizeof(PxU8));
*tris16++ = x;
}
}
else
{
PxU32* tris32 = reinterpret_cast<PxU32*>(tris);
for(PxU32 i=0;i<nbIndices;i++)
{
stream.read(&x, sizeof(PxU8));
*tris32++ = x;
}
}
}
static void read16BitIndices(PxInputStream& stream, void* tris, PxU32 nbIndices, const bool has16BitIndices, const bool mismatch)
{
if(has16BitIndices)
{
PxU16* tris16 = reinterpret_cast<PxU16*>(tris);
stream.read(tris16, nbIndices*sizeof(PxU16));
if(mismatch)
{
for(PxU32 i=0;i<nbIndices;i++)
flip(tris16[i]);
}
}
else
{
PxU32* tris32 = reinterpret_cast<PxU32*>(tris);
PxU16 x;
for(PxU32 i=0;i<nbIndices;i++)
{
stream.read(&x, sizeof(PxU16));
if(mismatch)
flip(x);
*tris32++ = x;
}
}
}
static void read32BitIndices(PxInputStream& stream, void* tris, PxU32 nbIndices, const bool has16BitIndices, const bool mismatch)
{
if(has16BitIndices)
{
PxU32 x;
PxU16* tris16 = reinterpret_cast<PxU16*>(tris);
for(PxU32 i=0;i<nbIndices;i++)
{
stream.read(&x, sizeof(PxU32));
if(mismatch)
flip(x);
*tris16++ = PxTo16(x);
}
}
else
{
PxU32* tris32 = reinterpret_cast<PxU32*>(tris);
stream.read(tris32, nbIndices*sizeof(PxU32));
if(mismatch)
{
for(PxU32 i=0;i<nbIndices;i++)
flip(tris32[i]);
}
}
}
static TriangleMeshData* loadMeshData(PxInputStream& stream)
{
// Import header
PxU32 version;
bool mismatch;
if(!readHeader('M', 'E', 'S', 'H', version, mismatch, stream))
return NULL;
PxU32 midphaseID = PxMeshMidPhase::eBVH33; // Default before version 14
if(version>=14) // this refers to PX_MESH_VERSION
midphaseID = readDword(mismatch, stream);
// Check if old (incompatible) mesh format is loaded
if (version <= 9) // this refers to PX_MESH_VERSION
{
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Loading triangle mesh failed: "
"Deprecated mesh cooking format. Please recook your mesh in a new cooking format.");
PX_ALWAYS_ASSERT_MESSAGE("Obsolete cooked mesh found. Mesh version has been updated, please recook your meshes.");
return NULL;
}
// Import serialization flags
const PxU32 serialFlags = readDword(mismatch, stream);
// Import misc values
if (version <= 12) // this refers to PX_MESH_VERSION
{
// convexEdgeThreshold was removed in 3.4.0
readFloat(mismatch, stream);
}
TriangleMeshData* data;
if(midphaseID==PxMeshMidPhase::eBVH33)
data = PX_NEW(RTreeTriangleData);
else if(midphaseID==PxMeshMidPhase::eBVH34)
data = PX_NEW(BV4TriangleData);
else return NULL;
// Import mesh
PxVec3* verts = data->allocateVertices(readDword(mismatch, stream));
const PxU32 nbTris = readDword(mismatch, stream);
const bool force32 = (serialFlags & (IMSF_8BIT_INDICES|IMSF_16BIT_INDICES)) == 0;
//ML: this will allocate CPU triangle indices and GPU triangle indices if we have GRB data built
void* tris = data->allocateTriangles(nbTris, force32, serialFlags & IMSF_GRB_DATA);
stream.read(verts, sizeof(PxVec3)*data->mNbVertices);
if(mismatch)
{
for(PxU32 i=0;i<data->mNbVertices;i++)
{
flip(verts[i].x);
flip(verts[i].y);
flip(verts[i].z);
}
}
//TODO: stop support for format conversion on load!!
const PxU32 nbIndices = 3*data->mNbTriangles;
if(serialFlags & IMSF_8BIT_INDICES)
read8BitIndices(stream, tris, nbIndices, data->has16BitIndices());
else if(serialFlags & IMSF_16BIT_INDICES)
read16BitIndices(stream, tris, nbIndices, data->has16BitIndices(), mismatch);
else
read32BitIndices(stream, tris, nbIndices, data->has16BitIndices(), mismatch);
if(serialFlags & IMSF_MATERIALS)
{
PxU16* materials = data->allocateMaterials();
stream.read(materials, sizeof(PxU16)*data->mNbTriangles);
if(mismatch)
{
for(PxU32 i=0;i<data->mNbTriangles;i++)
flip(materials[i]);
}
}
if(serialFlags & IMSF_FACE_REMAP)
{
PxU32* remap = data->allocateFaceRemap();
readIndices(readDword(mismatch, stream), data->mNbTriangles, remap, stream, mismatch);
}
if(serialFlags & IMSF_ADJACENCIES)
{
PxU32* adj = data->allocateAdjacencies();
stream.read(adj, sizeof(PxU32)*data->mNbTriangles*3);
if(mismatch)
{
for(PxU32 i=0;i<data->mNbTriangles*3;i++)
flip(adj[i]);
}
}
// PT: TODO better
if(midphaseID==PxMeshMidPhase::eBVH33)
{
if(!static_cast<RTreeTriangleData*>(data)->mRTree.load(stream, version, mismatch))
{
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "RTree binary image load error.");
PX_DELETE(data);
return NULL;
}
}
else if(midphaseID==PxMeshMidPhase::eBVH34)
{
BV4TriangleData* bv4data = static_cast<BV4TriangleData*>(data);
if(!bv4data->mBV4Tree.load(stream, mismatch))
{
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "BV4 binary image load error.");
PX_DELETE(data);
return NULL;
}
bv4data->mMeshInterface.setNbTriangles(nbTris);
bv4data->mMeshInterface.setNbVertices(data->mNbVertices);
if(data->has16BitIndices())
bv4data->mMeshInterface.setPointers(NULL, reinterpret_cast<IndTri16*>(tris), verts);
else
bv4data->mMeshInterface.setPointers(reinterpret_cast<IndTri32*>(tris), NULL, verts);
bv4data->mBV4Tree.mMeshInterface = &bv4data->mMeshInterface;
}
else PX_ASSERT(0);
// Import local bounds
data->mGeomEpsilon = readFloat(mismatch, stream);
readFloatBuffer(&data->mAABB.minimum.x, 6, mismatch, stream);
PxU32 nb = readDword(mismatch, stream);
if(nb)
{
PX_ASSERT(nb==data->mNbTriangles);
data->allocateExtraTrigData();
// No need to convert those bytes
stream.read(data->mExtraTrigData, nb*sizeof(PxU8));
}
if(serialFlags & IMSF_GRB_DATA)
{
PxU32 GRB_meshAdjVerticiesTotal = 0;
if(version < 15)
GRB_meshAdjVerticiesTotal = readDword(mismatch, stream);
//read grb triangle indices
PX_ASSERT(data->mGRB_primIndices);
if(serialFlags & IMSF_8BIT_INDICES)
read8BitIndices(stream, data->mGRB_primIndices, nbIndices, data->has16BitIndices());
else if(serialFlags & IMSF_16BIT_INDICES)
read16BitIndices(stream, data->mGRB_primIndices, nbIndices, data->has16BitIndices(), mismatch);
else
read32BitIndices(stream, data->mGRB_primIndices, nbIndices, data->has16BitIndices(), mismatch);
data->mGRB_primAdjacencies = PX_ALLOCATE(PxU32, data->mNbTriangles*4, "mGRB_primAdjacencies");
data->mGRB_faceRemap = PX_ALLOCATE(PxU32, data->mNbTriangles, "mGRB_faceRemap");
if(serialFlags & IMSF_GRB_INV_REMAP)
data->mGRB_faceRemapInverse = PX_ALLOCATE(PxU32, data->mNbTriangles, "mGRB_faceRemapInverse");
stream.read(data->mGRB_primAdjacencies, sizeof(PxU32)*data->mNbTriangles*4);
if (version < 15)
{
//stream.read(data->mGRB_vertValency, sizeof(PxU32)*data->mNbVertices);
for (PxU32 i = 0; i < data->mNbVertices; ++i)
readDword(mismatch, stream);
//stream.read(data->mGRB_adjVertStart, sizeof(PxU32)*data->mNbVertices);
for (PxU32 i = 0; i < data->mNbVertices; ++i)
readDword(mismatch, stream);
//stream.read(data->mGRB_adjVertices, sizeof(PxU32)*GRB_meshAdjVerticiesTotal);
for (PxU32 i = 0; i < GRB_meshAdjVerticiesTotal; ++i)
readDword(mismatch, stream);
}
stream.read(data->mGRB_faceRemap, sizeof(PxU32)*data->mNbTriangles);
if(data->mGRB_faceRemapInverse)
stream.read(data->mGRB_faceRemapInverse, sizeof(PxU32)*data->mNbTriangles);
if(mismatch)
{
for(PxU32 i=0;i<data->mNbTriangles*4;i++)
flip(reinterpret_cast<PxU32 *>(data->mGRB_primIndices)[i]);
for(PxU32 i=0;i<data->mNbTriangles*4;i++)
flip(reinterpret_cast<PxU32 *>(data->mGRB_primAdjacencies)[i]);
}
//read BV32
data->mGRB_BV32Tree = PX_NEW(BV32Tree);
if (!data->mGRB_BV32Tree->load(stream, mismatch))
{
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "BV32 binary image load error.");
PX_DELETE(data);
return NULL;
}
if (serialFlags & IMSF_VERT_MAPPING)
{
//import vertex mapping data
data->mNbTrianglesReferences = readDword(mismatch, stream);
data->mAccumulatedTrianglesRef = PX_ALLOCATE(PxU32, data->mNbVertices, "mAccumulatedTrianglesRef");
data->mTrianglesReferences = PX_ALLOCATE(PxU32, data->mNbTrianglesReferences, "mTrianglesReferences");
stream.read(data->mAccumulatedTrianglesRef, data->mNbVertices * sizeof(PxU32));
stream.read(data->mTrianglesReferences, data->mNbTrianglesReferences * sizeof(PxU32));
}
}
if (serialFlags & IMSF_SDF)
{
// Import sdf
SDF& sdfData = data->mSdfData;
sdfData.mMeshLower.x = readFloat(mismatch, stream);
sdfData.mMeshLower.y = readFloat(mismatch, stream);
sdfData.mMeshLower.z = readFloat(mismatch, stream);
sdfData.mSpacing = readFloat(mismatch, stream);
sdfData.mDims.x = readDword(mismatch, stream);
sdfData.mDims.y = readDword(mismatch, stream);
sdfData.mDims.z = readDword(mismatch, stream);
sdfData.mNumSdfs = readDword(mismatch, stream);
sdfData.mNumSubgridSdfs = readDword(mismatch, stream);
sdfData.mNumStartSlots = readDword(mismatch, stream);
sdfData.mSubgridSize = readDword(mismatch, stream);
sdfData.mSdfSubgrids3DTexBlockDim.x = readDword(mismatch, stream);
sdfData.mSdfSubgrids3DTexBlockDim.y = readDword(mismatch, stream);
sdfData.mSdfSubgrids3DTexBlockDim.z = readDword(mismatch, stream);
sdfData.mSubgridsMinSdfValue = readFloat(mismatch, stream);
sdfData.mSubgridsMaxSdfValue = readFloat(mismatch, stream);
sdfData.mBytesPerSparsePixel = readDword(mismatch, stream);
PxReal* sdf = sdfData.allocateSdfs(sdfData.mMeshLower, sdfData.mSpacing, sdfData.mDims.x, sdfData.mDims.y, sdfData.mDims.z,
sdfData.mSubgridSize, sdfData.mSdfSubgrids3DTexBlockDim.x, sdfData.mSdfSubgrids3DTexBlockDim.y, sdfData.mSdfSubgrids3DTexBlockDim.z,
sdfData.mSubgridsMinSdfValue, sdfData.mSubgridsMaxSdfValue, sdfData.mBytesPerSparsePixel);
stream.read(sdf, sizeof(PxReal) * sdfData.mNumSdfs);
readByteBuffer(sdfData.mSubgridSdf, sdfData.mNumSubgridSdfs, stream);
readIntBuffer(sdfData.mSubgridStartSlots, sdfData.mNumStartSlots, mismatch, stream);
}
if (serialFlags & IMSF_INERTIA)
{
// Import inertia
stream.read(&data->mMass, sizeof(PxReal));
readFloatBuffer(&data->mInertia(0, 0), 9, mismatch, stream);
readFloatBuffer(&data->mLocalCenterOfMass.x, 3, mismatch, stream);
}
return data;
}
static void readIndices(const PxU32 serialFlags, void* indices, const PxU32 nbIndices,
const bool has16BitIndices, const bool mismatch, PxInputStream& stream)
{
if(serialFlags & IMSF_8BIT_INDICES)
read8BitIndices(stream, indices, nbIndices, has16BitIndices);
else if(serialFlags & IMSF_16BIT_INDICES)
read16BitIndices(stream, indices, nbIndices, has16BitIndices, mismatch);
else
read32BitIndices(stream, indices, nbIndices, has16BitIndices, mismatch);
}
void MeshFactory::addTriangleMesh(TriangleMesh* np, bool lock)
{
addToHash(mTriangleMeshes, np, lock ? &mTrackingMutex : NULL);
OMNI_PVD_NOTIFY_ADD(np);
}
PxTriangleMesh* MeshFactory::createTriangleMesh(TriangleMeshData& data)
{
TriangleMesh* np;
if(data.mType==PxMeshMidPhase::eBVH33)
{
PX_NEW_SERIALIZED(np, RTreeTriangleMesh)(this, data);
}
else if(data.mType==PxMeshMidPhase::eBVH34)
{
PX_NEW_SERIALIZED(np, BV4TriangleMesh)(this, data);
}
else return NULL;
if(np)
addTriangleMesh(np);
return np;
}
// data injected by cooking lib for runtime cooking
PxTriangleMesh* MeshFactory::createTriangleMesh(void* data)
{
return createTriangleMesh(*reinterpret_cast<TriangleMeshData*>(data));
}
PxTriangleMesh* MeshFactory::createTriangleMesh(PxInputStream& desc)
{
TriangleMeshData* data = ::loadMeshData(desc);
if(!data)
return NULL;
PxTriangleMesh* m = createTriangleMesh(*data);
PX_DELETE(data);
return m;
}
bool MeshFactory::removeTriangleMesh(PxTriangleMesh& m)
{
TriangleMesh* gu = static_cast<TriangleMesh*>(&m);
OMNI_PVD_NOTIFY_REMOVE(gu);
PxMutex::ScopedLock lock(mTrackingMutex);
bool found = mTriangleMeshes.erase(gu);
return found;
}
PxU32 MeshFactory::getNbTriangleMeshes() const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return mTriangleMeshes.size();
}
PxU32 MeshFactory::getTriangleMeshes(PxTriangleMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return getArrayOfPointers(userBuffer, bufferSize, startIndex, mTriangleMeshes.getEntries(), mTriangleMeshes.size());
}
///////////////////////////////////////////////////////////////////////////////
static TetrahedronMeshData* loadTetrahedronMeshData(PxInputStream& stream)
{
// Import header
PxU32 version;
bool mismatch;
if (!readHeader('T', 'E', 'M', 'E', version, mismatch, stream))
return NULL;
// Import serialization flags
const PxU32 serialFlags = readDword(mismatch, stream);
TetrahedronMeshData* data = PX_NEW(TetrahedronMeshData);
// Import mesh
const PxU32 nbVerts = readDword(mismatch, stream);
PxVec3* verts = data->allocateVertices(nbVerts);
//const PxU32 nbSurfaceTriangles = readDword(mismatch, stream);
const PxU32 nbTetrahedrons = readDword(mismatch, stream);
//ML: this will allocate CPU tetrahedron indices and GPU tetrahedron indices and other GPU data if we have GRB data built
//void* tets = data->allocateTetrahedrons(nbTetrahedrons, serialFlags & IMSF_GRB_DATA);
data->allocateTetrahedrons(nbTetrahedrons, 1);
void* tets = data->mTetrahedrons;
stream.read(verts, sizeof(PxVec3)*data->mNbVertices);
//stream.read(restPoses, sizeof(PxMat33) * data->mNbTetrahedrons);
if (mismatch)
{
for (PxU32 i = 0; i < data->mNbVertices; i++)
{
flip(verts[i].x);
flip(verts[i].y);
flip(verts[i].z);
}
}
//TODO: stop support for format conversion on load!!
const PxU32 nbTetIndices = 4 * data->mNbTetrahedrons;
readIndices(serialFlags, tets, nbTetIndices, data->has16BitIndices(), mismatch, stream);
// Import local bounds
data->mGeomEpsilon = readFloat(mismatch, stream);
readFloatBuffer(&data->mAABB.minimum.x, 6, mismatch, stream);
return data;
}
static bool loadSoftBodyMeshData(PxInputStream& stream, SoftBodyMeshData& data)
{
// Import header
PxU32 version;
bool mismatch;
if (!readHeader('S', 'O', 'M', 'E', version, mismatch, stream))
return false;
// Import serialization flags
const PxU32 serialFlags = readDword(mismatch, stream);
// Import mesh
const PxU32 nbVerts = readDword(mismatch, stream);
PxVec3* verts = data.mCollisionMesh.allocateVertices(nbVerts);
//const PxU32 nbSurfaceTriangles = readDword(mismatch, stream);
const PxU32 nbTetrahedrons= readDword(mismatch, stream);
//ML: this will allocate CPU tetrahedron indices and GPU tetrahedron indices and other GPU data if we have GRB data built
//void* tets = data.allocateTetrahedrons(nbTetrahedrons, serialFlags & IMSF_GRB_DATA);
data.mCollisionMesh.allocateTetrahedrons(nbTetrahedrons, 1);
if (serialFlags & IMSF_GRB_DATA)
data.mCollisionData.allocateCollisionData(nbTetrahedrons);
void* tets = data.mCollisionMesh.mTetrahedrons;
//void* surfaceTriangles = data.mCollisionData.allocateSurfaceTriangles(nbSurfaceTriangles);
//void* restPoses = data.mTetraRestPoses;
stream.read(verts, sizeof(PxVec3)*nbVerts);
//stream.read(restPoses, sizeof(PxMat33) * data.mNbTetrahedrons);
if (mismatch)
{
for (PxU32 i = 0; i< nbVerts; i++)
{
flip(verts[i].x);
flip(verts[i].y);
flip(verts[i].z);
}
}
//TODO: stop support for format conversion on load!!
const PxU32 nbTetIndices = 4 * nbTetrahedrons;
readIndices(serialFlags, tets, nbTetIndices, data.mCollisionMesh.has16BitIndices(), mismatch, stream);
//const PxU32 nbSurfaceTriangleIndices = 3 * nbSurfaceTriangles;
//readIndices(serialFlags, surfaceTriangles, nbSurfaceTriangleIndices, data.mCollisionMesh.has16BitIndices(), mismatch, stream);
////using IMSF_ADJACENCIES for tetMesh tetrahedron surface hint
//if (serialFlags & IMSF_ADJACENCIES)
//{
// PxU8* surfaceHints = reinterpret_cast<PxU8*>(data.mTetraSurfaceHint);
// stream.read(surfaceHints, sizeof(PxU8)*data.mNbTetrahedrons);
//}
if (serialFlags & IMSF_MATERIALS)
{
PxU16* materials = data.mCollisionMesh.allocateMaterials();
stream.read(materials, sizeof(PxU16)*nbTetrahedrons);
if (mismatch)
{
for (PxU32 i = 0; i < nbTetrahedrons; i++)
flip(materials[i]);
}
}
if (serialFlags & IMSF_FACE_REMAP)
{
PxU32* remap = data.mCollisionData.allocateFaceRemap(nbTetrahedrons);
readIndices(readDword(mismatch, stream), nbTetrahedrons, remap, stream, mismatch);
}
/*if (serialFlags & IMSF_ADJACENCIES)
{
PxU32* adj = data.allocateAdjacencies();
stream.read(adj, sizeof(PxU32)*data.mNbTetrahedrons * 4);
if (mismatch)
{
for (PxU32 i = 0; i<data.mNbTetrahedrons * 4; i++)
flip(adj[i]);
}
}*/
SoftBodyMeshData* bv4data = &data;
if (!bv4data->mCollisionData.mBV4Tree.load(stream, mismatch))
{
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "BV4 binary image load error.");
//PX_DELETE(data);
return false;
}
bv4data->mCollisionData.mMeshInterface.setNbTetrahedrons(nbTetrahedrons);
bv4data->mCollisionData.mMeshInterface.setNbVertices(nbVerts);
if (data.mCollisionMesh.has16BitIndices())
bv4data->mCollisionData.mMeshInterface.setPointers(NULL, reinterpret_cast<IndTetrahedron16*>(tets), verts);
else
bv4data->mCollisionData.mMeshInterface.setPointers(reinterpret_cast<IndTetrahedron32*>(tets), NULL, verts);
bv4data->mCollisionData.mBV4Tree.mMeshInterface = &bv4data->mCollisionData.mMeshInterface;
// Import local bounds
data.mCollisionMesh.mGeomEpsilon = readFloat(mismatch, stream);
readFloatBuffer(&data.mCollisionMesh.mAABB.minimum.x, 6, mismatch, stream);
if (serialFlags & IMSF_GRB_DATA)
{
/*PxU32 GRB_meshAdjVerticiesTotal = 0;
if (version < 15)
GRB_meshAdjVerticiesTotal = readDword(mismatch, stream);*/
//read grb tetrahedron indices
PX_ASSERT(data.mCollisionData.mGRB_primIndices);
//read tetrahedron indices
readIndices(serialFlags, data.mCollisionData.mGRB_primIndices, nbTetIndices, data.mCollisionMesh.has16BitIndices(), mismatch, stream);
//data.mGRB_primAdjacencies = static_cast<void *>(PX_NEW(PxU32)[data.mNbTetrahedrons * 4]);
//data.mGRB_surfaceTriIndices = static_cast<void *>(PX_NEW(PxU32)[data.mNbTriangles * 3]);
data.mCollisionData.mGRB_faceRemap = PX_ALLOCATE(PxU32, data.mCollisionMesh.mNbTetrahedrons, "mGRB_faceRemap");
data.mCollisionData.mGRB_faceRemapInverse = PX_ALLOCATE(PxU32, data.mCollisionMesh.mNbTetrahedrons, "mGRB_faceRemapInverse");
//data.mGRB_surfaceTriangleIndice = PX_NEW(PxU32)[data.mNbSurfaceTriangles * 3];
//stream.read(data.mGRB_primAdjacencies, sizeof(PxU32)*data.mNbTetrahedrons * 4);
stream.read(data.mCollisionData.mGRB_tetraSurfaceHint, sizeof(PxU8) * data.mCollisionMesh.mNbTetrahedrons);
stream.read(data.mCollisionData.mGRB_faceRemap, sizeof(PxU32) * data.mCollisionMesh.mNbTetrahedrons);
stream.read(data.mCollisionData.mGRB_faceRemapInverse, sizeof(PxU32) * data.mCollisionMesh.mNbTetrahedrons);
//stream.read(data.mGRB_surfaceTriangleIndice, sizeof(PxU32) * data.mNbSurfaceTriangles * 3);
stream.read(data.mCollisionData.mTetraRestPoses, sizeof(PxMat33) * nbTetrahedrons);
if (mismatch)
{
for (PxU32 i = 0; i<data.mCollisionMesh.mNbTetrahedrons * 4; i++)
flip(reinterpret_cast<PxU32 *>(data.mCollisionData.mGRB_primIndices)[i]);
}
//read BV32
data.mCollisionData.mGRB_BV32Tree = PX_NEW(BV32Tree);
if (!data.mCollisionData.mGRB_BV32Tree->load(stream, mismatch))
{
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "BV32 binary image load error.");
//PX_DELETE(data);
return false;
}
const PxU32 nbGridModelTetrahedrons = readDword(mismatch, stream);
const PxU32 nbGridModelVertices = readDword(mismatch, stream);
const PxU32 nbGridModelPartitions = readDword(mismatch, stream);
const PxU32 nbGMMaxTetsPerPartition = readDword(mismatch, stream);
const PxU32 nbGMRemapOutputSize = readDword(mismatch, stream);
PxU32 numTetsPerElement = 1;
if(version >= 2)
numTetsPerElement = readDword(mismatch, stream);
const PxU32 nbGMTotalTetReferenceCount = readDword(mismatch, stream);
const PxU32 nbTetRemapSize = readDword(mismatch, stream);
const PxU32 numVertsPerElement = (numTetsPerElement == 5 || numTetsPerElement == 6) ? 8 : 4;
const PxU32 numSimElements = nbGridModelTetrahedrons / numTetsPerElement;
data.mSimulationData.mGridModelMaxTetsPerPartitions = nbGMMaxTetsPerPartition;
data.mSimulationData.mNumTetsPerElement = numTetsPerElement;
data.mMappingData.mTetsRemapSize = nbTetRemapSize;
/*data.allocateGridModelData(nbGridModelTetrahedrons, nbGridModelVertices,
data.mCollisionMesh.mNbVertices, nbGridModelPartitions, nbGMRemapOutputSize,
nbGMTotalTetReferenceCount, nbTetRemapSize, data.mCollisionMesh.mNbTetrahedrons,
serialFlags & IMSF_GRB_DATA);*/
data.mSimulationMesh.allocateTetrahedrons(nbGridModelTetrahedrons, serialFlags & IMSF_GRB_DATA);
data.mSimulationMesh.allocateVertices(nbGridModelVertices, serialFlags & IMSF_GRB_DATA);
data.mSimulationData.allocateGridModelData(nbGridModelTetrahedrons, nbGridModelVertices,
data.mCollisionMesh.mNbVertices, nbGridModelPartitions, nbGMRemapOutputSize, numTetsPerElement, serialFlags & IMSF_GRB_DATA);
data.mMappingData.allocatemappingData(data.mCollisionMesh.mNbVertices, nbTetRemapSize, data.mCollisionMesh.mNbTetrahedrons, serialFlags & IMSF_GRB_DATA);
data.mMappingData.allocateTetRefData(nbGMTotalTetReferenceCount, data.mCollisionMesh.mNbVertices, serialFlags & IMSF_GRB_DATA);
const PxU32 nbGridModelIndices = 4 * nbGridModelTetrahedrons;
readIndices(serialFlags, data.mSimulationMesh.mTetrahedrons, nbGridModelIndices, data.mSimulationMesh.has16BitIndices(), mismatch, stream);
//stream.read(data.mGridModelVerticesInvMass, sizeof(PxVec4) * nbGridModelVertices);
stream.read(data.mSimulationMesh.mVertices, sizeof(PxVec3) * nbGridModelVertices);
if (serialFlags & IMSF_MATERIALS)
{
PxU16* materials = data.mSimulationMesh.allocateMaterials();
stream.read(materials, sizeof(PxU16)*nbGridModelTetrahedrons);
if (mismatch)
{
for (PxU32 i = 0; i < nbTetrahedrons; i++)
flip(materials[i]);
}
}
stream.read(data.mSimulationData.mGridModelInvMass, sizeof(PxReal) * nbGridModelVertices);
stream.read(data.mSimulationData.mGridModelTetraRestPoses, sizeof(PxMat33) * nbGridModelTetrahedrons);
stream.read(data.mSimulationData.mGridModelOrderedTetrahedrons, sizeof(PxU32) * numSimElements);
stream.read(data.mSimulationData.mGMRemapOutputCP, sizeof(PxU32) * numSimElements * numVertsPerElement);
stream.read(data.mSimulationData.mGMAccumulatedPartitionsCP, sizeof(PxU32) * nbGridModelPartitions);
stream.read(data.mSimulationData.mGMAccumulatedCopiesCP, sizeof(PxU32) * data.mSimulationMesh.mNbVertices);
stream.read(data.mMappingData.mCollisionAccumulatedTetrahedronsRef, sizeof(PxU32) * data.mCollisionMesh.mNbVertices);
stream.read(data.mMappingData.mCollisionTetrahedronsReferences, sizeof(PxU32) * data.mMappingData.mCollisionNbTetrahedronsReferences);
stream.read(data.mMappingData.mCollisionSurfaceVertsHint, sizeof(PxU8) * data.mCollisionMesh.mNbVertices);
stream.read(data.mMappingData.mCollisionSurfaceVertToTetRemap, sizeof(PxU32) * data.mCollisionMesh.mNbVertices);
//stream.read(data->mVertsBarycentricInGridModel, sizeof(PxReal) * 4 * data->mNbVertices);
stream.read(data.mSimulationData.mGMPullIndices, sizeof(PxU32) * numSimElements * numVertsPerElement);
//stream.read(data->mVertsBarycentricInGridModel, sizeof(PxReal) * 4 * data->mNbVertices);
stream.read(data.mMappingData.mVertsBarycentricInGridModel, sizeof(PxReal) * 4 * data.mCollisionMesh.mNbVertices);
stream.read(data.mMappingData.mVertsRemapInGridModel, sizeof(PxU32) * data.mCollisionMesh.mNbVertices);
stream.read(data.mMappingData.mTetsRemapColToSim, sizeof(PxU32) *nbTetRemapSize);
stream.read(data.mMappingData.mTetsAccumulatedRemapColToSim, sizeof(PxU32) * data.mCollisionMesh.mNbTetrahedrons);
}
return true;
}
void MeshFactory::addTetrahedronMesh(TetrahedronMesh* np, bool lock)
{
addToHash(mTetrahedronMeshes, np, lock ? &mTrackingMutex : NULL);
OMNI_PVD_NOTIFY_ADD(np);
}
void MeshFactory::addSoftBodyMesh(SoftBodyMesh* np, bool lock)
{
addToHash(mSoftBodyMeshes, np, lock ? &mTrackingMutex : NULL);
OMNI_PVD_NOTIFY_ADD(np);
}
PxSoftBodyMesh* MeshFactory::createSoftBodyMesh(PxInputStream& desc)
{
TetrahedronMeshData mSimulationMesh;
SoftBodySimulationData mSimulationData;
TetrahedronMeshData mCollisionMesh;
SoftBodyCollisionData mCollisionData;
CollisionMeshMappingData mMappingData;
SoftBodyMeshData data(mSimulationMesh, mSimulationData, mCollisionMesh, mCollisionData, mMappingData);
if (!::loadSoftBodyMeshData(desc, data))
return NULL;
PxSoftBodyMesh* m = createSoftBodyMesh(data);
return m;
}
PxTetrahedronMesh* MeshFactory::createTetrahedronMesh(PxInputStream& desc)
{
TetrahedronMeshData* data = ::loadTetrahedronMeshData(desc);
if (!data)
return NULL;
PxTetrahedronMesh* m = createTetrahedronMesh(*data);
PX_DELETE(data);
return m;
}
PxTetrahedronMesh* MeshFactory::createTetrahedronMesh(TetrahedronMeshData& data)
{
TetrahedronMesh* np = NULL;
PX_NEW_SERIALIZED(np, TetrahedronMesh)(this, data);
if (np)
addTetrahedronMesh(np);
return np;
}
// data injected by cooking lib for runtime cooking
PxTetrahedronMesh* MeshFactory::createTetrahedronMesh(void* data)
{
return createTetrahedronMesh(*reinterpret_cast<TetrahedronMeshData*>(data));
}
PxSoftBodyMesh* MeshFactory::createSoftBodyMesh(Gu::SoftBodyMeshData& data)
{
SoftBodyMesh* np = NULL;
PX_NEW_SERIALIZED(np, SoftBodyMesh)(this, data);
if (np)
addSoftBodyMesh(np);
return np;
}
// data injected by cooking lib for runtime cooking
PxSoftBodyMesh* MeshFactory::createSoftBodyMesh(void* data)
{
return createSoftBodyMesh(*reinterpret_cast<SoftBodyMeshData*>(data));
}
bool MeshFactory::removeSoftBodyMesh(PxSoftBodyMesh& tetMesh)
{
SoftBodyMesh* gu = static_cast<SoftBodyMesh*>(&tetMesh);
OMNI_PVD_NOTIFY_REMOVE(gu);
PxMutex::ScopedLock lock(mTrackingMutex);
bool found = mSoftBodyMeshes.erase(gu);
return found;
}
bool MeshFactory::removeTetrahedronMesh(PxTetrahedronMesh& tetMesh)
{
TetrahedronMesh* gu = static_cast<TetrahedronMesh*>(&tetMesh);
OMNI_PVD_NOTIFY_REMOVE(gu);
PxMutex::ScopedLock lock(mTrackingMutex);
bool found = mTetrahedronMeshes.erase(gu);
return found;
}
PxU32 MeshFactory::getNbSoftBodyMeshes() const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return mSoftBodyMeshes.size();
}
PxU32 MeshFactory::getNbTetrahedronMeshes() const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return mTetrahedronMeshes.size();
}
PxU32 MeshFactory::getTetrahedronMeshes(PxTetrahedronMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return getArrayOfPointers(userBuffer, bufferSize, startIndex, mTetrahedronMeshes.getEntries(), mTetrahedronMeshes.size());
}
PxU32 MeshFactory::getSoftBodyMeshes(PxSoftBodyMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return getArrayOfPointers(userBuffer, bufferSize, startIndex, mSoftBodyMeshes.getEntries(), mSoftBodyMeshes.size());
}
///////////////////////////////////////////////////////////////////////////////
void MeshFactory::addConvexMesh(ConvexMesh* np, bool lock)
{
addToHash(mConvexMeshes, np, lock ? &mTrackingMutex : NULL);
OMNI_PVD_NOTIFY_ADD(np);
}
// data injected by cooking lib for runtime cooking
PxConvexMesh* MeshFactory::createConvexMesh(void* data)
{
return createConvexMesh(*reinterpret_cast<ConvexHullInitData*>(data));
}
PxConvexMesh* MeshFactory::createConvexMesh(ConvexHullInitData& data)
{
ConvexMesh* np;
PX_NEW_SERIALIZED(np, ConvexMesh)(this, data);
if (np)
addConvexMesh(np);
return np;
}
PxConvexMesh* MeshFactory::createConvexMesh(PxInputStream& desc)
{
ConvexMesh* np;
PX_NEW_SERIALIZED(np, ConvexMesh)(this);
if(!np)
return NULL;
if(!np->load(desc))
{
Cm::deletePxBase(np);
return NULL;
}
addConvexMesh(np);
return np;
}
bool MeshFactory::removeConvexMesh(PxConvexMesh& m)
{
ConvexMesh* gu = static_cast<ConvexMesh*>(&m);
OMNI_PVD_NOTIFY_REMOVE(gu);
PxMutex::ScopedLock lock(mTrackingMutex);
bool found = mConvexMeshes.erase(gu);
return found;
}
PxU32 MeshFactory::getNbConvexMeshes() const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return mConvexMeshes.size();
}
PxU32 MeshFactory::getConvexMeshes(PxConvexMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return getArrayOfPointers(userBuffer, bufferSize, startIndex, mConvexMeshes.getEntries(), mConvexMeshes.size());
}
///////////////////////////////////////////////////////////////////////////////
void MeshFactory::addHeightField(HeightField* np, bool lock)
{
addToHash(mHeightFields, np, lock ? &mTrackingMutex : NULL);
OMNI_PVD_NOTIFY_ADD(np);
}
PxHeightField* MeshFactory::createHeightField(void* heightFieldMeshData)
{
HeightField* np;
PX_NEW_SERIALIZED(np, HeightField)(this, *reinterpret_cast<HeightFieldData*>(heightFieldMeshData));
if(np)
addHeightField(np);
return np;
}
PxHeightField* MeshFactory::createHeightField(PxInputStream& stream)
{
HeightField* np;
PX_NEW_SERIALIZED(np, HeightField)(this);
if(!np)
return NULL;
if(!np->load(stream))
{
Cm::deletePxBase(np);
return NULL;
}
addHeightField(np);
return np;
}
bool MeshFactory::removeHeightField(PxHeightField& hf)
{
HeightField* gu = static_cast<HeightField*>(&hf);
OMNI_PVD_NOTIFY_REMOVE(gu);
PxMutex::ScopedLock lock(mTrackingMutex);
bool found = mHeightFields.erase(gu);
return found;
}
PxU32 MeshFactory::getNbHeightFields() const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return mHeightFields.size();
}
PxU32 MeshFactory::getHeightFields(PxHeightField** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return getArrayOfPointers(userBuffer, bufferSize, startIndex, mHeightFields.getEntries(), mHeightFields.size());
}
///////////////////////////////////////////////////////////////////////////////
void MeshFactory::addFactoryListener(Gu::MeshFactoryListener& listener )
{
PxMutex::ScopedLock lock(mTrackingMutex);
mFactoryListeners.pushBack( &listener );
}
void MeshFactory::removeFactoryListener(Gu::MeshFactoryListener& listener )
{
PxMutex::ScopedLock lock(mTrackingMutex);
for ( PxU32 idx = 0; idx < mFactoryListeners.size(); ++idx )
{
if ( mFactoryListeners[idx] == &listener )
{
mFactoryListeners.replaceWithLast( idx );
--idx;
}
}
}
void MeshFactory::notifyFactoryListener(const PxBase* base, PxType typeID)
{
const PxU32 nbListeners = mFactoryListeners.size();
for(PxU32 i=0; i<nbListeners; i++)
mFactoryListeners[i]->onMeshFactoryBufferRelease(base, typeID);
}
#if PX_SUPPORT_OMNI_PVD
void MeshFactory::notifyListenersAdd(const PxBase* base)
{
for (PxU32 i = 0; i < mFactoryListeners.size(); i++)
mFactoryListeners[i]->onObjectAdd(base);
}
void MeshFactory::notifyListenersRemove(const PxBase* base)
{
for (PxU32 i = 0; i < mFactoryListeners.size(); i++)
mFactoryListeners[i]->onObjectRemove(base);
}
#endif
///////////////////////////////////////////////////////////////////////////////
void MeshFactory::addBVH(BVH* np, bool lock)
{
addToHash(mBVHs, np, lock ? &mTrackingMutex : NULL);
OMNI_PVD_NOTIFY_ADD(np);
}
// data injected by cooking lib for runtime cooking
PxBVH* MeshFactory::createBVH(void* data)
{
return createBVH(*reinterpret_cast<BVHData*>(data));
}
PxBVH* MeshFactory::createBVH(BVHData& data)
{
BVH* np;
PX_NEW_SERIALIZED(np, BVH)(this, data);
if (np)
addBVH(np);
return np;
}
PxBVH* MeshFactory::createBVH(PxInputStream& desc)
{
BVH* np;
PX_NEW_SERIALIZED(np, BVH)(this);
if(!np)
return NULL;
if(!np->load(desc))
{
Cm::deletePxBase(np);
return NULL;
}
addBVH(np);
return np;
}
bool MeshFactory::removeBVH(PxBVH& m)
{
BVH* gu = static_cast<BVH*>(&m);
OMNI_PVD_NOTIFY_REMOVE(gu);
PxMutex::ScopedLock lock(mTrackingMutex);
bool found = mBVHs.erase(gu);
return found;
}
PxU32 MeshFactory::getNbBVHs() const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return mBVHs.size();
}
PxU32 MeshFactory::getBVHs(PxBVH** userBuffer, PxU32 bufferSize, PxU32 startIndex) const
{
PxMutex::ScopedLock lock(mTrackingMutex);
return getArrayOfPointers(userBuffer, bufferSize, startIndex, mBVHs.getEntries(), mBVHs.size());
}
///////////////////////////////////////////////////////////////////////////////
bool MeshFactory::remove(PxBase& obj)
{
const PxType type = obj.getConcreteType();
if(type==PxConcreteType::eHEIGHTFIELD)
return removeHeightField(static_cast<PxHeightField&>(obj));
else if(type==PxConcreteType::eCONVEX_MESH)
return removeConvexMesh(static_cast<PxConvexMesh&>(obj));
else if(type==PxConcreteType::eTRIANGLE_MESH_BVH33 || type==PxConcreteType::eTRIANGLE_MESH_BVH34)
return removeTriangleMesh(static_cast<PxTriangleMesh&>(obj));
else if(type==PxConcreteType::eTETRAHEDRON_MESH)
return removeTetrahedronMesh(static_cast<PxTetrahedronMesh&>(obj));
else if (type == PxConcreteType::eSOFTBODY_MESH)
return removeSoftBodyMesh(static_cast<PxSoftBodyMesh&>(obj));
else if(type==PxConcreteType::eBVH)
return removeBVH(static_cast<PxBVH&>(obj));
return false;
}
///////////////////////////////////////////////////////////////////////////////
namespace
{
class StandaloneInsertionCallback : public PxInsertionCallback
{
public:
StandaloneInsertionCallback() {}
virtual PxBase* buildObjectFromData(PxConcreteType::Enum type, void* data)
{
if(type == PxConcreteType::eTRIANGLE_MESH_BVH33)
{
TriangleMesh* np;
PX_NEW_SERIALIZED(np, RTreeTriangleMesh)(NULL, *reinterpret_cast<TriangleMeshData*>(data));
return np;
}
if(type == PxConcreteType::eTRIANGLE_MESH_BVH34)
{
TriangleMesh* np;
PX_NEW_SERIALIZED(np, BV4TriangleMesh)(NULL, *reinterpret_cast<TriangleMeshData*>(data));
return np;
}
if(type == PxConcreteType::eCONVEX_MESH)
{
ConvexMesh* np;
PX_NEW_SERIALIZED(np, ConvexMesh)(NULL, *reinterpret_cast<ConvexHullInitData*>(data));
return np;
}
if(type == PxConcreteType::eHEIGHTFIELD)
{
HeightField* np;
PX_NEW_SERIALIZED(np, HeightField)(NULL, *reinterpret_cast<HeightFieldData*>(data));
return np;
}
if(type == PxConcreteType::eBVH)
{
BVH* np;
PX_NEW_SERIALIZED(np, BVH)(NULL, *reinterpret_cast<BVHData*>(data));
return np;
}
if (type == PxConcreteType::eTETRAHEDRON_MESH)
{
TetrahedronMesh* np;
PX_NEW_SERIALIZED(np, TetrahedronMesh)(NULL, *reinterpret_cast<TetrahedronMeshData*>(data));
return np;
}
if (type == PxConcreteType::eSOFTBODY_MESH)
{
SoftBodyMesh* np;
PX_NEW_SERIALIZED(np, SoftBodyMesh)(NULL, *reinterpret_cast<SoftBodyMeshData*>(data));
return np;
}
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Inserting object failed: "
"Object type not supported for buildObjectFromData.");
return NULL;
}
}gSAIC;
}
PxInsertionCallback* physx::immediateCooking::getInsertionCallback()
{
return &gSAIC;
}
| 39,213 | C++ | 30.752227 | 155 | 0.731339 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBVH.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BVH_H
#define GU_BVH_H
/** \addtogroup geomutils
@{
*/
#include "geometry/PxBVH.h"
#include "CmRefCountable.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxUserAllocated.h"
#include "GuAABBTreeBounds.h"
#include "GuAABBTree.h"
namespace physx
{
struct PxBVHInternalData;
namespace Gu
{
class MeshFactory;
struct BVHNode;
class ShapeData;
class BVHData : public BVHPartialRefitData
{
public:
BVHData() {}
BVHData(BVHData& other)
{
mNbIndices = other.mNbIndices;
mNbNodes = other.mNbNodes;
mIndices = other.mIndices;
mNodes = other.mNodes;
mBounds.moveFrom(other.mBounds);
other.mIndices = NULL;
other.mNodes = NULL;
}
~BVHData()
{
if(mBounds.ownsMemory())
{
mBounds.release();
PX_FREE(mIndices);
PX_FREE(mNodes); // PT: TODO: fix this, unify with AABBTree version
}
mNbNodes = 0;
mNbIndices = 0;
}
PX_PHYSX_COMMON_API bool build(PxU32 nbBounds, const void* boundsData, PxU32 boundsStride, float enlargement, PxU32 numPrimsPerLeaf, BVHBuildStrategy bs);
PX_PHYSX_COMMON_API bool save(PxOutputStream& stream, bool endian) const;
AABBTreeBounds mBounds;
};
/**
\brief Represents a BVH.
*/
class BVH : public PxBVH, public PxUserAllocated, public Cm::RefCountable
{
public:
// PT: TODO: revisit these PX_PHYSX_COMMON_API calls. At the end of the day the issue is that things like PxUserAllocated aren't exported.
PX_PHYSX_COMMON_API BVH(MeshFactory* factory);
PX_PHYSX_COMMON_API BVH(MeshFactory* factory, BVHData& data);
PX_PHYSX_COMMON_API BVH(const PxBVHInternalData& data);
virtual ~BVH();
PX_PHYSX_COMMON_API bool init(PxU32 nbPrims, AABBTreeBounds* bounds, const void* boundsData, PxU32 stride, BVHBuildStrategy bs, PxU32 nbPrimsPerLeaf, float enlargement);
bool load(PxInputStream& desc);
void release();
// PxBVH
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const PX_OVERRIDE;
virtual bool overlap(const PxGeometry& geom, const PxTransform& pose, OverlapCallback& cb, PxGeometryQueryFlags flags) const PX_OVERRIDE;
virtual bool sweep(const PxGeometry& geom, const PxTransform& pose, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const PX_OVERRIDE;
virtual bool cull(PxU32 nbPlanes, const PxPlane* planes, OverlapCallback& cb, PxGeometryQueryFlags flags) const PX_OVERRIDE;
virtual PxU32 getNbBounds() const PX_OVERRIDE { return mData.mNbIndices; }
virtual const PxBounds3* getBounds() const PX_OVERRIDE { return mData.mBounds.getBounds(); }
virtual void refit() PX_OVERRIDE;
virtual bool updateBounds(PxU32 boundsIndex, const PxBounds3& newBounds) PX_OVERRIDE;
virtual void partialRefit() PX_OVERRIDE;
virtual bool traverse(TraversalCallback& cb) const PX_OVERRIDE;
//~PxBVH
// Cm::RefCountable
virtual void onRefCountZero() PX_OVERRIDE;
//~Cm::RefCountable
PX_FORCE_INLINE const BVHNode* getNodes() const { return mData.mNodes; }
PX_FORCE_INLINE const PxU32* getIndices() const { return mData.mIndices; }
PX_FORCE_INLINE const BVHData& getData() const { return mData; }
bool getInternalData(PxBVHInternalData&, bool) const;
bool updateBoundsInternal(PxU32 localIndex, const PxBounds3& bounds);
// PT: alternative implementations directly working on shape data
bool overlap(const ShapeData& shapeData, OverlapCallback& cb, PxGeometryQueryFlags flags) const;
bool sweep(const ShapeData& shapeData, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const;
private:
MeshFactory* mMeshFactory;
BVHData mData;
};
}
}
/** @} */
#endif
| 5,708 | C | 38.645833 | 182 | 0.710757 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuWindingNumber.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_WINDING_NUMBER_H
#define GU_WINDING_NUMBER_H
/** \addtogroup geomutils
@{
*/
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxArray.h"
#include "GuWindingNumberCluster.h"
namespace physx
{
namespace Gu
{
struct BVHNode;
typedef ClusterApproximationT<PxReal, PxVec3> ClusterApproximation;
PX_PHYSX_COMMON_API PxF32 computeWindingNumber(const Gu::BVHNode* tree, const PxVec3& q, const PxHashMap<PxU32, ClusterApproximation>& clusters,
const PxU32* triangles, const PxVec3* points);
PX_PHYSX_COMMON_API PxF32 computeWindingNumber(const Gu::BVHNode* tree, const PxVec3& q, PxF32 beta, const PxHashMap<PxU32, ClusterApproximation>& clusters,
const PxU32* triangles, const PxVec3* points);
PX_PHYSX_COMMON_API void precomputeClusterInformation(const Gu::BVHNode* tree, const PxU32* triangles, const PxU32 numTriangles,
const PxVec3* points, PxHashMap<PxU32, ClusterApproximation>& result, PxI32 rootNodeIndex = 0);
//Quite slow, only useful for few query points, otherwise it is worth to construct a tree for acceleration
PX_PHYSX_COMMON_API PxF32 computeWindingNumber(const PxVec3& q, const PxU32* triangles, const PxU32 numTriangles, const PxVec3* points);
}
}
/** @} */
#endif
| 2,951 | C | 44.415384 | 157 | 0.771264 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuGeometryQuery.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxGeometryQuery.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxPlaneGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include "geometry/PxTriangleMeshGeometry.h"
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxHeightFieldGeometry.h"
#include "geometry/PxHairSystemGeometry.h"
#include "geometry/PxParticleSystemGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "foundation/PxAtomic.h"
#include "GuInternal.h"
#include "GuOverlapTests.h"
#include "GuSweepTests.h"
#include "GuRaycastTests.h"
#include "GuBoxConversion.h"
#include "GuTriangleMesh.h"
#include "GuMTD.h"
#include "GuBounds.h"
#include "GuDistancePointSegment.h"
#include "GuConvexMesh.h"
#include "GuDistancePointBox.h"
#include "GuMidphaseInterface.h"
#include "foundation/PxFPU.h"
#include "GuConvexEdgeFlags.h"
#include "GuVecBox.h"
#include "GuVecConvexHull.h"
#include "GuPCMShapeConvex.h"
#include "GuPCMContactConvexCommon.h"
using namespace physx;
using namespace Gu;
extern GeomSweepFuncs gGeomSweepFuncs;
extern GeomOverlapTable gGeomOverlapMethodTable[];
extern RaycastFunc gRaycastMap[PxGeometryType::eGEOMETRY_COUNT];
///////////////////////////////////////////////////////////////////////////////
bool PxGeometryQuery::isValid(const PxGeometry& g)
{
switch(PxU32(g.getType()))
{
case PxGeometryType::eSPHERE: return static_cast<const PxSphereGeometry&>(g).isValid();
case PxGeometryType::ePLANE: return static_cast<const PxPlaneGeometry&>(g).isValid();
case PxGeometryType::eCAPSULE: return static_cast<const PxCapsuleGeometry&>(g).isValid();
case PxGeometryType::eBOX: return static_cast<const PxBoxGeometry&>(g).isValid();
case PxGeometryType::eCONVEXMESH: return static_cast<const PxConvexMeshGeometry&>(g).isValid();
case PxGeometryType::eTRIANGLEMESH: return static_cast<const PxTriangleMeshGeometry&>(g).isValid();
case PxGeometryType::eHEIGHTFIELD: return static_cast<const PxHeightFieldGeometry&>(g).isValid();
case PxGeometryType::eTETRAHEDRONMESH: return static_cast<const PxTetrahedronMeshGeometry&>(g).isValid();
case PxGeometryType::ePARTICLESYSTEM: return static_cast<const PxParticleSystemGeometry&>(g).isValid();
case PxGeometryType::eHAIRSYSTEM: return static_cast<const PxHairSystemGeometry&>(g).isValid();
case PxGeometryType::eCUSTOM: return static_cast<const PxCustomGeometry&>(g).isValid();
}
return false;
}
///////////////////////////////////////////////////////////////////////////////
bool PxGeometryQuery::sweep(const PxVec3& unitDir, const PxReal distance,
const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
PxGeomSweepHit& sweepHit, PxHitFlags hitFlags,
const PxReal inflation, PxGeometryQueryFlags queryFlags, PxSweepThreadContext* threadContext)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN_VAL(pose0.isValid(), "PxGeometryQuery::sweep(): pose0 is not valid.", false);
PX_CHECK_AND_RETURN_VAL(pose1.isValid(), "PxGeometryQuery::sweep(): pose1 is not valid.", false);
PX_CHECK_AND_RETURN_VAL(unitDir.isFinite(), "PxGeometryQuery::sweep(): unitDir is not valid.", false);
PX_CHECK_AND_RETURN_VAL(PxIsFinite(distance), "PxGeometryQuery::sweep(): distance is not valid.", false);
PX_CHECK_AND_RETURN_VAL((distance >= 0.0f && !(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP)) || distance > 0.0f,
"PxGeometryQuery::sweep(): sweep distance must be >=0 or >0 with eASSUME_NO_INITIAL_OVERLAP.", 0);
#if PX_CHECKED
if(!PxGeometryQuery::isValid(geom0))
return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "Provided geometry 0 is not valid");
if(!PxGeometryQuery::isValid(geom1))
return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "Provided geometry 1 is not valid");
#endif
const GeomSweepFuncs& sf = gGeomSweepFuncs;
switch(geom0.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxCapsuleGeometry capsuleGeom(sphereGeom.radius, 0.0f);
const Capsule worldCapsule(pose0.p, pose0.p, sphereGeom.radius);
const bool precise = hitFlags & PxHitFlag::ePRECISE_SWEEP;
const SweepCapsuleFunc func = precise ? sf.preciseCapsuleMap[geom1.getType()] : sf.capsuleMap[geom1.getType()];
return func(geom1, pose1, capsuleGeom, pose0, worldCapsule, unitDir, distance, sweepHit, hitFlags, inflation, threadContext);
}
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
Capsule worldCapsule;
getCapsule(worldCapsule, capsuleGeom, pose0);
const bool precise = hitFlags & PxHitFlag::ePRECISE_SWEEP;
const SweepCapsuleFunc func = precise ? sf.preciseCapsuleMap[geom1.getType()] : sf.capsuleMap[geom1.getType()];
return func(geom1, pose1, capsuleGeom, pose0, worldCapsule, unitDir, distance, sweepHit, hitFlags, inflation, threadContext);
}
case PxGeometryType::eBOX:
{
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
Box box;
buildFrom(box, pose0.p, boxGeom.halfExtents, pose0.q);
const bool precise = hitFlags & PxHitFlag::ePRECISE_SWEEP;
const SweepBoxFunc func = precise ? sf.preciseBoxMap[geom1.getType()] : sf.boxMap[geom1.getType()];
return func(geom1, pose1, boxGeom, pose0, box, unitDir, distance, sweepHit, hitFlags, inflation, threadContext);
}
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom0);
const SweepConvexFunc func = sf.convexMap[geom1.getType()];
return func(geom1, pose1, convexGeom, pose0, unitDir, distance, sweepHit, hitFlags, inflation, threadContext);
}
default:
PX_CHECK_MSG(false, "PxGeometryQuery::sweep(): first geometry object parameter must be sphere, capsule, box or convex geometry.");
}
return false;
}
///////////////////////////////////////////////////////////////////////////////
bool PxGeometryQuery::overlap( const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1,
PxGeometryQueryFlags queryFlags, PxOverlapThreadContext* threadContext)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
return Gu::overlap(geom0, pose0, geom1, pose1, gGeomOverlapMethodTable, threadContext);
}
///////////////////////////////////////////////////////////////////////////////
PxU32 PxGeometryQuery::raycast( const PxVec3& rayOrigin, const PxVec3& rayDir,
const PxGeometry& geom, const PxTransform& pose,
PxReal maxDist, PxHitFlags hitFlags, PxU32 maxHits, PxGeomRaycastHit* PX_RESTRICT rayHits, PxU32 stride,
PxGeometryQueryFlags queryFlags, PxRaycastThreadContext* threadContext)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN_VAL(rayDir.isFinite(), "PxGeometryQuery::raycast(): rayDir is not valid.", 0);
PX_CHECK_AND_RETURN_VAL(rayOrigin.isFinite(), "PxGeometryQuery::raycast(): rayOrigin is not valid.", 0);
PX_CHECK_AND_RETURN_VAL(pose.isValid(), "PxGeometryQuery::raycast(): pose is not valid.", 0);
PX_CHECK_AND_RETURN_VAL(maxDist >= 0.0f, "PxGeometryQuery::raycast(): maxDist is negative.", false);
PX_CHECK_AND_RETURN_VAL(PxIsFinite(maxDist), "PxGeometryQuery::raycast(): maxDist is not valid.", false);
PX_CHECK_AND_RETURN_VAL(PxAbs(rayDir.magnitudeSquared()-1)<1e-4f, "PxGeometryQuery::raycast(): ray direction must be unit vector.", false);
const RaycastFunc func = gRaycastMap[geom.getType()];
return func(geom, pose, rayOrigin, rayDir, maxDist, hitFlags, maxHits, rayHits, stride, threadContext);
}
///////////////////////////////////////////////////////////////////////////////
bool pointConvexDistance(PxVec3& normal_, PxVec3& closestPoint_, PxReal& sqDistance, const PxVec3& pt, const ConvexMesh* convexMesh, const PxMeshScale& meshScale, const PxTransform32& convexPose);
PxReal PxGeometryQuery::pointDistance(const PxVec3& point, const PxGeometry& geom, const PxTransform& pose, PxVec3* closestPoint, PxU32* closestIndex, PxGeometryQueryFlags queryFlags)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN_VAL(pose.isValid(), "PxGeometryQuery::pointDistance(): pose is not valid.", -1.0f);
switch(geom.getType())
{
case PxGeometryType::eSPHERE:
{
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
const PxReal r = sphereGeom.radius;
PxVec3 delta = point - pose.p;
const PxReal d = delta.magnitude();
if(d<=r)
return 0.0f;
if(closestPoint)
{
delta /= d;
*closestPoint = pose.p + delta * r;
}
return (d - r)*(d - r);
}
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& capsGeom = static_cast<const PxCapsuleGeometry&>(geom);
Capsule capsule;
getCapsule(capsule, capsGeom, pose);
const PxReal r = capsGeom.radius;
PxReal param;
const PxReal sqDistance = distancePointSegmentSquared(capsule, point, ¶m);
if(sqDistance<=r*r)
return 0.0f;
const PxReal d = physx::intrinsics::sqrt(sqDistance);
if(closestPoint)
{
const PxVec3 cp = capsule.getPointAt(param);
PxVec3 delta = point - cp;
delta.normalize();
*closestPoint = cp + delta * r;
}
return (d - r)*(d - r);
}
case PxGeometryType::eBOX:
{
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
Box obb;
buildFrom(obb, pose.p, boxGeom.halfExtents, pose.q);
PxVec3 boxParam;
const PxReal sqDistance = distancePointBoxSquared(point, obb, &boxParam);
if(closestPoint && sqDistance!=0.0f)
{
*closestPoint = obb.transform(boxParam);
}
return sqDistance;
}
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
const PxTransform32 poseA(pose);
PxVec3 normal, cp;
PxReal sqDistance;
const bool intersect = pointConvexDistance(normal, cp, sqDistance, point, static_cast<ConvexMesh*>(convexGeom.convexMesh), convexGeom.scale, poseA);
if(!intersect && closestPoint)
*closestPoint = cp;
return sqDistance;
}
case PxGeometryType::eTRIANGLEMESH:
{
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom);
PxU32 index;
float dist;
PxVec3 cp;
Midphase::pointMeshDistance(static_cast<TriangleMesh*>(meshGeom.triangleMesh), meshGeom, pose, point, FLT_MAX, index, dist, cp);
if(closestPoint)
*closestPoint = cp;
if(closestIndex)
*closestIndex = index;
return dist*dist;
}
default:
PX_CHECK_MSG(false, "PxGeometryQuery::pointDistance(): geometry object parameter must be sphere, capsule, box, convex or mesh geometry.");
break;
}
return -1.0f;
}
///////////////////////////////////////////////////////////////////////////////
void PxGeometryQuery::computeGeomBounds(PxBounds3& bounds, const PxGeometry& geom, const PxTransform& pose, float offset, float inflation, PxGeometryQueryFlags queryFlags)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN(pose.isValid(), "PxGeometryQuery::computeGeomBounds(): pose is not valid.");
Gu::computeBounds(bounds, geom, pose, offset, inflation);
PX_ASSERT(bounds.isValid());
}
///////////////////////////////////////////////////////////////////////////////
extern GeomMTDFunc gGeomMTDMethodTable[][PxGeometryType::eGEOMETRY_COUNT];
bool PxGeometryQuery::computePenetration( PxVec3& mtd, PxF32& depth,
const PxGeometry& geom0, const PxTransform& pose0,
const PxGeometry& geom1, const PxTransform& pose1, PxGeometryQueryFlags queryFlags)
{
PX_SIMD_GUARD_CNDT(queryFlags & PxGeometryQueryFlag::eSIMD_GUARD)
PX_CHECK_AND_RETURN_VAL(pose0.isValid(), "PxGeometryQuery::computePenetration(): pose0 is not valid.", false);
PX_CHECK_AND_RETURN_VAL(pose1.isValid(), "PxGeometryQuery::computePenetration(): pose1 is not valid.", false);
const PxTransform32 pose0A(pose0);
const PxTransform32 pose1A(pose1);
if(geom0.getType() > geom1.getType())
{
GeomMTDFunc mtdFunc = gGeomMTDMethodTable[geom1.getType()][geom0.getType()];
PX_ASSERT(mtdFunc);
if(!mtdFunc(mtd, depth, geom1, pose1A, geom0, pose0A))
return false;
mtd = -mtd;
return true;
}
else
{
GeomMTDFunc mtdFunc = gGeomMTDMethodTable[geom0.getType()][geom1.getType()];
PX_ASSERT(mtdFunc);
return mtdFunc(mtd, depth, geom0, pose0A, geom1, pose1A);
}
}
///////////////////////////////////////////////////////////////////////////////
bool PxGeometryQuery::generateTriangleContacts(const PxGeometry& geom, const PxTransform& pose, const PxVec3 triangleVertices[3], PxU32 triangleIndex, PxReal contactDistance, PxReal meshContactMargin, PxReal toleranceLength, PxContactBuffer& contactBuffer)
{
using namespace aos;
const PxU32 triangleIndices[3]{ 0, 1, 2 };
PxInlineArray<PxU32, LOCAL_PCM_CONTACTS_SIZE> deferredContacts;
Gu::MultiplePersistentContactManifold multiManifold;
multiManifold.initialize();
PxContactBuffer contactBuffer0; contactBuffer0.reset();
const PxTransformV geomTransform = loadTransformU(pose);
const PxTransformV triangleTransform = loadTransformU(PxTransform(PxIdentity));
float radius0 = 0;
float radius1 = meshContactMargin;
PxU32 oldCount = contactBuffer.count;
switch (geom.getType())
{
case PxGeometryType::eCAPSULE:
{
const PxCapsuleGeometry& capsule = static_cast<const PxCapsuleGeometry&>(geom);
radius0 = capsule.radius;
const FloatV capsuleRadius = FLoad(capsule.radius);
const FloatV contactDist = FLoad(contactDistance + meshContactMargin);
const FloatV replaceBreakingThreshold = FMul(capsuleRadius, FLoad(0.001f));
const PxTransformV capsuleTransform = geomTransform;
const PxTransformV meshTransform = triangleTransform;
multiManifold.setRelativeTransform(capsuleTransform);
const Gu::CapsuleV capsuleV(V3LoadU(pose.p), V3LoadU(pose.q.rotate(PxVec3(capsule.halfHeight, 0, 0))), capsuleRadius);
Gu::PCMCapsuleVsMeshContactGeneration contactGeneration(capsuleV, contactDist, replaceBreakingThreshold, capsuleTransform, meshTransform, multiManifold, contactBuffer0, &deferredContacts);
contactGeneration.processTriangle(triangleVertices, triangleIndex, Gu::ETD_CONVEX_EDGE_ALL, triangleIndices);
contactGeneration.processContacts(GU_CAPSULE_MANIFOLD_CACHE_SIZE, false);
break;
}
case PxGeometryType::eBOX:
{
const PxBoxGeometry& box = static_cast<const PxBoxGeometry&>(geom);
const PxBounds3 hullAABB(-box.halfExtents, box.halfExtents);
const Vec3V boxExtents = V3LoadU(box.halfExtents);
const FloatV minMargin = Gu::CalculatePCMBoxMargin(boxExtents, toleranceLength, GU_PCM_MESH_MANIFOLD_EPSILON);
Cm::FastVertex2ShapeScaling idtScaling;
const FloatV contactDist = FLoad(contactDistance + meshContactMargin);
const FloatV replaceBreakingThreshold = FMul(minMargin, FLoad(0.05f));
const BoxV boxV(V3Zero(), boxExtents);
const PxTransformV boxTransform = geomTransform;
const PxTransformV meshTransform = triangleTransform;
PolygonalData polyData;
PCMPolygonalBox polyBox(box.halfExtents);
polyBox.getPolygonalData(&polyData);
const Mat33V identity = M33Identity();
SupportLocalImpl<BoxV> boxMap(boxV, boxTransform, identity, identity, true);
Gu::PCMConvexVsMeshContactGeneration contactGeneration(contactDist, replaceBreakingThreshold, boxTransform, meshTransform, multiManifold, contactBuffer0, polyData, &boxMap, &deferredContacts, idtScaling, true, true, NULL);
contactGeneration.processTriangle(triangleVertices, triangleIndex, Gu::ETD_CONVEX_EDGE_ALL, triangleIndices);
contactGeneration.processContacts(GU_SINGLE_MANIFOLD_CACHE_SIZE, false);
break;
}
case PxGeometryType::eCONVEXMESH:
{
const PxConvexMeshGeometry& convex = static_cast<const PxConvexMeshGeometry&>(geom);
const ConvexHullData* hullData = _getHullData(convex);
Cm::FastVertex2ShapeScaling convexScaling;
PxBounds3 hullAABB;
PolygonalData polyData;
const bool idtConvexScale = getPCMConvexData(convex, convexScaling, hullAABB, polyData);
const QuatV vQuat = QuatVLoadU(&convex.scale.rotation.x);
const Vec3V vScale = V3LoadU_SafeReadW(convex.scale.scale);
const FloatV minMargin = CalculatePCMConvexMargin(hullData, vScale, toleranceLength, GU_PCM_MESH_MANIFOLD_EPSILON);
const ConvexHullV convexHull(hullData, V3Zero(), vScale, vQuat, idtConvexScale);
const FloatV contactDist = FLoad(contactDistance + meshContactMargin);
const FloatV replaceBreakingThreshold = FMul(minMargin, FLoad(0.05f));
const PxTransformV convexTransform = geomTransform;
const PxTransformV meshTransform = triangleTransform;
SupportLocalImpl<Gu::ConvexHullV> convexMap(convexHull, convexTransform, convexHull.vertex2Shape, convexHull.shape2Vertex, false);
Gu::PCMConvexVsMeshContactGeneration contactGeneration(contactDist, replaceBreakingThreshold, convexTransform, meshTransform, multiManifold, contactBuffer0, polyData, &convexMap, &deferredContacts, convexScaling, idtConvexScale, true, NULL);
contactGeneration.processTriangle(triangleVertices, triangleIndex, Gu::ETD_CONVEX_EDGE_ALL, triangleIndices);
contactGeneration.processContacts(GU_SINGLE_MANIFOLD_CACHE_SIZE, false);
break;
}
default:
break;
}
for (PxU32 manifoldIndex = 0; manifoldIndex < multiManifold.mNumManifolds; ++manifoldIndex)
{
Gu::SinglePersistentContactManifold& manifold = *multiManifold.getManifold(manifoldIndex);
PxVec3 normal; V3StoreU(manifold.getWorldNormal(triangleTransform), normal);
for (PxU32 contactIndex = 0; contactIndex < manifold.getNumContacts(); ++contactIndex)
{
Gu::MeshPersistentContact& meshContact = manifold.getContactPoint(contactIndex);
PxContactPoint contact;
PxVec3 p0; V3StoreU(geomTransform.transform(meshContact.mLocalPointA), p0); p0 -= normal * radius0;
PxVec3 p1; V3StoreU(meshContact.mLocalPointB, p1); p1 += normal * radius1;
contact.point = (p0 + p1) * 0.5f;
contact.normal = normal;
contact.separation = normal.dot(p0 - p1);
contact.internalFaceIndex1 = triangleIndex;
contactBuffer.contact(contact);
}
}
return oldCount < contactBuffer.count;
}
///////////////////////////////////////////////////////////////////////////////
PxU32 PxCustomGeometry::getUniqueID()
{
static PxU32 uniqueID(0);
PxAtomicIncrement(reinterpret_cast<volatile PxI32*>(&uniqueID));
return uniqueID;
}
| 20,270 | C++ | 40.709876 | 256 | 0.732116 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSAH.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxAssert.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxMemory.h"
#include "GuSAH.h"
using namespace physx;
using namespace Gu;
static PX_FORCE_INLINE float getSurfaceArea(const PxBounds3& bounds)
{
const PxVec3 e = bounds.maximum - bounds.minimum;
return 2.0f * (e.x * e.y + e.x * e.z + e.y * e.z);
}
SAH_Buffers::SAH_Buffers(PxU32 nb_prims)
{
mKeys = PX_ALLOCATE(float, nb_prims, "temp");
mCumulativeLower = PX_ALLOCATE(float, nb_prims, "temp");
mCumulativeUpper = PX_ALLOCATE(float, nb_prims, "temp");
mNb = nb_prims;
}
SAH_Buffers::~SAH_Buffers()
{
PX_FREE(mKeys);
PX_FREE(mCumulativeLower);
PX_FREE(mCumulativeUpper);
}
bool SAH_Buffers::split(PxU32& leftCount, PxU32 nb, const PxU32* PX_RESTRICT prims, const PxBounds3* PX_RESTRICT boxes, const PxVec3* PX_RESTRICT centers)
{
PxU32 bestAxis = 0;
PxU32 bestIndex = 0;
float bestCost = PX_MAX_F32;
PX_ASSERT(nb<=mNb);
for(PxU32 axis=0;axis<3;axis++)
{
const PxU32* sorted;
{
float* keys = mKeys;
for(PxU32 i=0;i<nb;i++)
{
const PxU32 index = prims[i];
const float center = centers[index][axis];
keys[i] = center;
}
sorted = mSorters[axis].Sort(keys, nb).GetRanks();
}
float* cumulativeLower = mCumulativeLower;
float* cumulativeUpper = mCumulativeUpper;
/* if(0)
{
PxBounds3 bbox = PxBounds3::empty();
for(PxU32 i=0; i<nb; i++)
{
bbox.include(bboxes[references[axis][i]]);
bbox.include(boxes[prims[nb-sortedIndex-1]]);
}
for (size_t i = end - 1; i > begin; --i) {
bbox.extend(bboxes[references[axis][i]]);
costs[axis][i] = bbox.half_area() * (end - i);
}
bbox = BoundingBox<Scalar>::empty();
auto best_split = std::pair<Scalar, size_t>(std::numeric_limits<Scalar>::max(), end);
for (size_t i = begin; i < end - 1; ++i) {
bbox.extend(bboxes[references[axis][i]]);
auto cost = bbox.half_area() * (i + 1 - begin) + costs[axis][i + 1];
if (cost < best_split.first)
best_split = std::make_pair(cost, i + 1);
}
return best_split;
}*/
if(1)
{
// two passes over data to calculate upper and lower bounds
PxBounds3 lower = PxBounds3::empty();
PxBounds3 upper = PxBounds3::empty();
// lower.minimum = lower.maximum = PxVec3(0.0f);
// upper.minimum = upper.maximum = PxVec3(0.0f);
#if PX_ENABLE_ASSERTS
float prevLowerCenter = -PX_MAX_F32;
float prevUpperCenter = PX_MAX_F32;
#endif
for(PxU32 i=0; i<nb; ++i)
{
const PxU32 lowSortedIndex = sorted[i];
const PxU32 highSortedIndex = sorted[nb-i-1];
//lower.Union(m_faceBounds[faces[i]]);
PX_ASSERT(centers[prims[lowSortedIndex]][axis]>=prevLowerCenter);
lower.include(boxes[prims[lowSortedIndex]]);
#if PX_ENABLE_ASSERTS
prevLowerCenter = centers[prims[lowSortedIndex]][axis];
#endif
//upper.Union(m_faceBounds[faces[numFaces - i - 1]]);
PX_ASSERT(centers[prims[highSortedIndex]][axis]<=prevUpperCenter);
upper.include(boxes[prims[highSortedIndex]]);
#if PX_ENABLE_ASSERTS
prevUpperCenter = centers[prims[highSortedIndex]][axis];
#endif
cumulativeLower[i] = getSurfaceArea(lower);
cumulativeUpper[nb - i - 1] = getSurfaceArea(upper);
}
// const float invTotalSA = 1.0f / cumulativeUpper[0];
// test all split positions
for (PxU32 i = 0; i < nb - 1; ++i)
{
const float pBelow = cumulativeLower[i];// * invTotalSA;
const float pAbove = cumulativeUpper[i];// * invTotalSA;
// const float cost = 0.125f + (pBelow * i + pAbove * float(nb - i));
const float cost = (pBelow * i + pAbove * float(nb - i));
if(cost <= bestCost)
{
bestCost = cost;
bestIndex = i;
bestAxis = axis;
}
}
}
}
leftCount = bestIndex + 1;
if(leftCount==1 || leftCount==nb)
{
// Invalid split
return false;
}
/*
// re-sort by best axis
FaceSorter predicate(&m_vertices[0], &m_indices[0], m_numFaces * 3, bestAxis);
std::sort(faces, faces + numFaces, predicate);
return bestIndex + 1;
*/
{
PxU32* tmp = reinterpret_cast<PxU32*>(mKeys);
PxMemCopy(tmp, prims, nb*sizeof(PxU32));
const PxU32* bestOrder = mSorters[bestAxis].GetRanks();
PxU32* dst = const_cast<PxU32*>(prims);
for(PxU32 i=0;i<nb;i++)
dst[i] = tmp[bestOrder[i]];
}
return true;
}
| 5,974 | C++ | 29.958549 | 154 | 0.67777 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuInternal.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxBounds3.h"
#include "geometry/PxCapsuleGeometry.h"
#include "foundation/PxIntrinsics.h"
#include "GuInternal.h"
#include "GuBox.h"
#include "GuVecPlane.h"
#include "foundation/PxVecMath.h"
using namespace physx::aos;
using namespace physx;
/**
Computes the aabb points.
\param pts [out] 8 box points
*/
void Gu::computeBoxPoints(const PxBounds3& bounds, PxVec3* PX_RESTRICT pts)
{
PX_ASSERT(pts);
// Get box corners
const PxVec3& minimum = bounds.minimum;
const PxVec3& maximum = bounds.maximum;
// 7+------+6 0 = ---
// /| /| 1 = +--
// / | / | 2 = ++-
// / 4+---/--+5 3 = -+-
// 3+------+2 / y z 4 = --+
// | / | / | / 5 = +-+
// |/ |/ |/ 6 = +++
// 0+------+1 *---x 7 = -++
// Generate 8 corners of the bbox
pts[0] = PxVec3(minimum.x, minimum.y, minimum.z);
pts[1] = PxVec3(maximum.x, minimum.y, minimum.z);
pts[2] = PxVec3(maximum.x, maximum.y, minimum.z);
pts[3] = PxVec3(minimum.x, maximum.y, minimum.z);
pts[4] = PxVec3(minimum.x, minimum.y, maximum.z);
pts[5] = PxVec3(maximum.x, minimum.y, maximum.z);
pts[6] = PxVec3(maximum.x, maximum.y, maximum.z);
pts[7] = PxVec3(minimum.x, maximum.y, maximum.z);
}
PxPlane Gu::getPlane(const PxTransform& pose)
{
const PxVec3 n = pose.q.getBasisVector0();
return PxPlane(n, -pose.p.dot(n));
}
void Gu::computeSweptBox(Gu::Box& dest, const PxVec3& extents, const PxVec3& center, const PxMat33& rot, const PxVec3& unitDir, const PxReal distance)
{
PxVec3 R1, R2;
PxComputeBasisVectors(unitDir, R1, R2);
PxReal dd[3];
dd[0] = PxAbs(rot.column0.dot(unitDir));
dd[1] = PxAbs(rot.column1.dot(unitDir));
dd[2] = PxAbs(rot.column2.dot(unitDir));
PxReal dmax = dd[0];
PxU32 ax0=1;
PxU32 ax1=2;
if(dd[1]>dmax)
{
dmax=dd[1];
ax0=0;
ax1=2;
}
if(dd[2]>dmax)
{
dmax=dd[2];
ax0=0;
ax1=1;
}
if(dd[ax1]<dd[ax0])
PxSwap(ax0, ax1);
R1 = rot[ax0];
R1 -= (R1.dot(unitDir))*unitDir; // Project to plane whose normal is dir
R1.normalize();
R2 = unitDir.cross(R1);
dest.setAxes(unitDir, R1, R2);
PxReal offset[3];
offset[0] = distance;
offset[1] = distance*(unitDir.dot(R1));
offset[2] = distance*(unitDir.dot(R2));
for(PxU32 r=0; r<3; r++)
{
const PxVec3& R = dest.rot[r];
dest.extents[r] = offset[r]*0.5f + PxAbs(rot.column0.dot(R))*extents.x + PxAbs(rot.column1.dot(R))*extents.y + PxAbs(rot.column2.dot(R))*extents.z;
}
dest.center = center + unitDir*distance*0.5f;
}
| 4,162 | C++ | 32.304 | 150 | 0.677559 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBVHTestsSIMD.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_BVH_TESTS_SIMD_H
#define GU_BVH_TESTS_SIMD_H
#include "foundation/PxTransform.h"
#include "foundation/PxBounds3.h"
#include "geometry/PxBoxGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCapsuleGeometry.h"
#include "foundation/PxVecMath.h"
namespace physx
{
using namespace aos;
namespace Gu
{
struct RayAABBTest
{
PX_FORCE_INLINE RayAABBTest(const PxVec3& origin_, const PxVec3& unitDir_, const PxReal maxDist, const PxVec3& inflation_)
: mOrigin(V3LoadU(origin_))
, mDir(V3LoadU(unitDir_))
, mDirYZX(V3PermYZX(mDir))
, mInflation(V3LoadU(inflation_))
, mAbsDir(V3Abs(mDir))
, mAbsDirYZX(V3PermYZX(mAbsDir))
{
const PxVec3 ext = maxDist >= PX_MAX_F32 ? PxVec3( unitDir_.x == 0 ? origin_.x : PxSign(unitDir_.x)*PX_MAX_F32,
unitDir_.y == 0 ? origin_.y : PxSign(unitDir_.y)*PX_MAX_F32,
unitDir_.z == 0 ? origin_.z : PxSign(unitDir_.z)*PX_MAX_F32)
: origin_ + unitDir_ * maxDist;
mRayMin = V3Min(mOrigin, V3LoadU(ext));
mRayMax = V3Max(mOrigin, V3LoadU(ext));
}
PX_FORCE_INLINE void setDistance(PxReal distance)
{
const Vec3V ext = V3ScaleAdd(mDir, FLoad(distance), mOrigin);
mRayMin = V3Min(mOrigin, ext);
mRayMax = V3Max(mOrigin, ext);
}
template<bool TInflate>
PX_FORCE_INLINE PxU32 check(const Vec3V center, const Vec3V extents) const
{
const Vec3V iExt = TInflate ? V3Add(extents, mInflation) : extents;
// coordinate axes
const Vec3V nodeMax = V3Add(center, iExt);
const Vec3V nodeMin = V3Sub(center, iExt);
// cross axes
const Vec3V offset = V3Sub(mOrigin, center);
const Vec3V offsetYZX = V3PermYZX(offset);
const Vec3V iExtYZX = V3PermYZX(iExt);
const Vec3V f = V3NegMulSub(mDirYZX, offset, V3Mul(mDir, offsetYZX));
const Vec3V g = V3MulAdd(iExt, mAbsDirYZX, V3Mul(iExtYZX, mAbsDir));
const BoolV
maskA = V3IsGrtrOrEq(nodeMax, mRayMin),
maskB = V3IsGrtrOrEq(mRayMax, nodeMin),
maskC = V3IsGrtrOrEq(g, V3Abs(f));
const BoolV andABCMasks = BAnd(BAnd(maskA, maskB), maskC);
return BAllEqTTTT(andABCMasks);
}
const Vec3V mOrigin, mDir, mDirYZX, mInflation, mAbsDir, mAbsDirYZX;
Vec3V mRayMin, mRayMax;
protected:
RayAABBTest& operator=(const RayAABBTest&);
};
// probably not worth having a SIMD version of this unless the traversal passes Vec3Vs
struct AABBAABBTest
{
PX_FORCE_INLINE AABBAABBTest(const PxTransform&t, const PxBoxGeometry&b)
: mCenter(V3LoadU(t.p))
, mExtents(V3LoadU(b.halfExtents))
{ }
PX_FORCE_INLINE AABBAABBTest(const PxBounds3& b)
: mCenter(V3LoadU(b.getCenter()))
, mExtents(V3LoadU(b.getExtents()))
{ }
PX_FORCE_INLINE PxIntBool operator()(const Vec3V center, const Vec3V extents) const
{
//PxVec3 c; PxVec3_From_Vec3V(center, c);
//PxVec3 e; PxVec3_From_Vec3V(extents, e);
//if(PxAbs(c.x - mCenter.x) > mExtents.x + e.x) return IntFalse;
//if(PxAbs(c.y - mCenter.y) > mExtents.y + e.y) return IntFalse;
//if(PxAbs(c.z - mCenter.z) > mExtents.z + e.z) return IntFalse;
//return IntTrue;
return PxIntBool(V3AllGrtrOrEq(V3Add(mExtents, extents), V3Abs(V3Sub(center, mCenter))));
}
private:
AABBAABBTest& operator=(const AABBAABBTest&);
const Vec3V mCenter, mExtents;
};
struct SphereAABBTest
{
PX_FORCE_INLINE SphereAABBTest(const PxTransform& t, const PxSphereGeometry& s)
: mCenter(V3LoadU(t.p))
, mRadius2(FLoad(s.radius * s.radius))
{}
PX_FORCE_INLINE SphereAABBTest(const PxVec3& center, PxF32 radius)
: mCenter(V3LoadU(center))
, mRadius2(FLoad(radius * radius))
{}
PX_FORCE_INLINE PxIntBool operator()(const Vec3V boxCenter, const Vec3V boxExtents) const
{
const Vec3V offset = V3Sub(mCenter, boxCenter);
const Vec3V closest = V3Clamp(offset, V3Neg(boxExtents), boxExtents);
const Vec3V d = V3Sub(offset, closest);
return PxIntBool(BAllEqTTTT(FIsGrtrOrEq(mRadius2, V3Dot(d, d))));
}
private:
SphereAABBTest& operator=(const SphereAABBTest&);
const Vec3V mCenter;
const FloatV mRadius2;
};
// The Opcode capsule-AABB traversal test seems to be *exactly* the same as the ray-box test inflated by the capsule radius (so not a true capsule/box test)
// and the code for the ray-box test is better. TODO: check the zero length case and use the sphere traversal if this one fails.
// (OTOH it's not that hard to adapt the Ray-AABB test to a capsule test)
struct CapsuleAABBTest: private RayAABBTest
{
PX_FORCE_INLINE CapsuleAABBTest(const PxVec3& origin, const PxVec3& unitDir, const PxReal length, const PxVec3& inflation)
: RayAABBTest(origin, unitDir, length, inflation)
{}
PX_FORCE_INLINE PxIntBool operator()(const Vec3VArg center, const Vec3VArg extents) const
{
return PxIntBool(RayAABBTest::check<true>(center, extents));
}
};
template<bool fullTest>
struct OBBAABBTests
{
OBBAABBTests(const PxVec3& pos, const PxMat33& rot, const PxVec3& halfExtentsInflated)
{
const Vec3V eps = V3Load(1e-6f);
mT = V3LoadU(pos);
mExtents = V3LoadU(halfExtentsInflated);
// storing the transpose matrices yields a simpler SIMD test
mRT = Mat33V_From_PxMat33(rot.getTranspose());
mART = Mat33V(V3Add(V3Abs(mRT.col0), eps), V3Add(V3Abs(mRT.col1), eps), V3Add(V3Abs(mRT.col2), eps));
mBB_xyz = M33TrnspsMulV3(mART, mExtents);
if(fullTest)
{
const Vec3V eYZX = V3PermYZX(mExtents), eZXY = V3PermZXY(mExtents);
mBB_123 = V3MulAdd(eYZX, V3PermZXY(mART.col0), V3Mul(eZXY, V3PermYZX(mART.col0)));
mBB_456 = V3MulAdd(eYZX, V3PermZXY(mART.col1), V3Mul(eZXY, V3PermYZX(mART.col1)));
mBB_789 = V3MulAdd(eYZX, V3PermZXY(mART.col2), V3Mul(eZXY, V3PermYZX(mART.col2)));
}
}
// TODO: force inline it?
PxIntBool operator()(const Vec3V center, const Vec3V extents) const
{
const Vec3V t = V3Sub(mT, center);
// class I - axes of AABB
if(V3OutOfBounds(t, V3Add(extents, mBB_xyz)))
return PxIntFalse;
const Vec3V rX = mRT.col0, rY = mRT.col1, rZ = mRT.col2;
const Vec3V arX = mART.col0, arY = mART.col1, arZ = mART.col2;
const FloatV eX = V3GetX(extents), eY = V3GetY(extents), eZ = V3GetZ(extents);
const FloatV tX = V3GetX(t), tY = V3GetY(t), tZ = V3GetZ(t);
// class II - axes of OBB
{
const Vec3V v = V3ScaleAdd(rZ, tZ, V3ScaleAdd(rY, tY, V3Scale(rX, tX)));
const Vec3V v2 = V3ScaleAdd(arZ, eZ, V3ScaleAdd(arY, eY, V3ScaleAdd(arX, eX, mExtents)));
if(V3OutOfBounds(v, v2))
return PxIntFalse;
}
if(!fullTest)
return PxIntTrue;
// class III - edge cross products. Almost all OBB tests early-out with type I or type II,
// so early-outs here probably aren't useful (TODO: profile)
const Vec3V va = V3NegScaleSub(rZ, tY, V3Scale(rY, tZ));
const Vec3V va2 = V3ScaleAdd(arY, eZ, V3ScaleAdd(arZ, eY, mBB_123));
const BoolV ba = BOr(V3IsGrtr(va, va2), V3IsGrtr(V3Neg(va2), va));
const Vec3V vb = V3NegScaleSub(rX, tZ, V3Scale(rZ, tX));
const Vec3V vb2 = V3ScaleAdd(arX, eZ, V3ScaleAdd(arZ, eX, mBB_456));
const BoolV bb = BOr(V3IsGrtr(vb, vb2), V3IsGrtr(V3Neg(vb2), vb));
const Vec3V vc = V3NegScaleSub(rY, tX, V3Scale(rX, tY));
const Vec3V vc2 = V3ScaleAdd(arX, eY, V3ScaleAdd(arY, eX, mBB_789));
const BoolV bc = BOr(V3IsGrtr(vc, vc2), V3IsGrtr(V3Neg(vc2), vc));
return PxIntBool(BAllEqFFFF(BOr(ba, BOr(bb,bc))));
}
Vec3V mExtents; // extents of OBB
Vec3V mT; // translation of OBB
Mat33V mRT; // transpose of rotation matrix of OBB
Mat33V mART; // transpose of mRT, padded by epsilon
Vec3V mBB_xyz; // extents of OBB along coordinate axes
Vec3V mBB_123; // projections of extents onto edge-cross axes
Vec3V mBB_456;
Vec3V mBB_789;
};
typedef OBBAABBTests<true> OBBAABBTest;
}
}
#endif
| 9,259 | C | 34.891473 | 156 | 0.718652 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBucketPruner.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMemory.h"
#include "foundation/PxBitUtils.h"
#include "GuBucketPruner.h"
#include "GuInternal.h"
#include "CmVisualization.h"
#include "CmRadixSort.h"
using namespace physx::aos;
using namespace physx;
using namespace Gu;
#define INVALID_HANDLE 0xffffffff
/*
TODO:
- if Core is always available, mSortedObjects could be replaced with just indices to mCoreObjects => less memory.
- UTS:
- test that queries against empty boxes all return false
- invalidate after 16 removes
- check shiftOrigin stuff (esp what happens to emptied boxes)
- isn't there a very hard-to-find bug waiting to happen in there,
when the shift touches the empty box and overrides mdata0/mdata1 with "wrong" values that break the sort?
- revisit updateObject/removeObject
- optimize/cache computation of free global bounds before clipRay
- remove temp memory buffers (sorted arrays)
- take care of code duplication
- better code to generate SIMD 0x7fffffff
- refactor SIMD tests
- optimize:
- better split values
- optimize update (bitmap, less data copy, etc)
- use ray limits in traversal code too?
- the SIMD XBOX code operates on Min/Max rather than C/E. Change format?
- or just try the alternative ray-box code (as on PC) ==> pretty much exactly the same speed
*/
//#define VERIFY_SORT
//#define BRUTE_FORCE_LIMIT 32
#define LOCAL_SIZE 256 // Size of various local arrays. Dynamic allocations occur if exceeded.
#define USE_SIMD // Use SIMD code or not (sanity performance check)
#define NODE_SORT // Enable/disable node sorting
#define NODE_SORT_MIN_COUNT 16 // Limit above which node sorting is performed
#if PX_INTEL_FAMILY
#if COMPILE_VECTOR_INTRINSICS
#define CAN_USE_MOVEMASK
#endif
#endif
#define ALIGN16(size) ((unsigned(size)+15) & unsigned(~15))
#ifdef _DEBUG
#define AlignedLoad V4LoadU
#define AlignedStore V4StoreU
#else
#define AlignedLoad V4LoadA
#define AlignedStore V4StoreA
#endif
// SAT-based ray-box overlap test has accuracy issues for long rays, so we clip them against the global AABB to limit these issues.
static void clipRay(const PxVec3& rayOrig, const PxVec3& rayDir, float& maxDist, const PxVec3& boxMin, const PxVec3& boxMax)
{
const PxVec3 boxCenter = (boxMax + boxMin)*0.5f;
const PxVec3 boxExtents = (boxMax - boxMin)*0.5f;
const float dpc = boxCenter.dot(rayDir);
const float extentsMagnitude = boxExtents.magnitude();
const float dpMin = dpc - extentsMagnitude;
const float dpMax = dpc + extentsMagnitude;
const float dpO = rayOrig.dot(rayDir);
const float boxLength = extentsMagnitude * 2.0f;
const float distToBox = PxMin(PxAbs(dpMin - dpO), PxAbs(dpMax - dpO));
maxDist = distToBox + boxLength * 2.0f;
}
BucketPrunerNode::BucketPrunerNode()
{
for(PxU32 i=0;i<5;i++)
mBucketBox[i].setEmpty();
}
static const PxU8 gCodes[] = { 4, 4, 4, 4, 4, 3, 2, 2,
4, 1, 0, 0, 4, 1, 0, 0,
4, 1, 0, 0, 2, 1, 0, 0,
3, 1, 0, 0, 2, 1, 0, 0};
#ifdef CAN_USE_MOVEMASK
/*static PX_FORCE_INLINE PxU32 classifyBox_x86(const BucketBox& box, const PxVec4& limits, const bool useY, const bool isCrossBucket)
{
const Vec4V extents = AlignedLoad(&box.mExtents.x);
const Vec4V center = AlignedLoad(&box.mCenter.x);
const Vec4V plus = V4Add(extents, center);
const Vec4V minus = V4Sub(extents, center);
Vec4V tmp;
if(useY) // PT: this is a constant so branch prediction works here
tmp = _mm_shuffle_ps(plus, minus, _MM_SHUFFLE(0,1,0,1));
else
tmp = _mm_shuffle_ps(plus, minus, _MM_SHUFFLE(0,2,0,2));
const Vec4V comp = _mm_shuffle_ps(tmp, tmp, _MM_SHUFFLE(0,2,1,3)); // oh well, nm
const PxU32 Code = (PxU32)_mm_movemask_ps(V4IsGrtr(V4LoadA(&limits.x), comp));
return gCodes[Code | PxU32(isCrossBucket)<<4];
}*/
static PX_FORCE_INLINE PxU32 classifyBox_x86(const Vec4V boxMin, const Vec4V boxMax, const PxVec4& limits, const bool useY, const bool isCrossBucket)
{
const Vec4V plus = boxMax;
const Vec4V minus = V4Neg(boxMin);
Vec4V tmp;
if(useY) // PT: this is a constant so branch prediction works here
tmp = _mm_shuffle_ps(plus, minus, _MM_SHUFFLE(0,1,0,1));
else
tmp = _mm_shuffle_ps(plus, minus, _MM_SHUFFLE(0,2,0,2));
const Vec4V comp = _mm_shuffle_ps(tmp, tmp, _MM_SHUFFLE(0,2,1,3)); // oh well, nm
const PxU32 Code = PxU32(_mm_movemask_ps(V4IsGrtr(V4LoadA(&limits.x), comp)));
return gCodes[Code | PxU32(isCrossBucket)<<4];
}
#endif
#ifdef CAN_USE_MOVEMASK
#if PX_DEBUG
#define USE_CLASSIFY_BOX
#endif
#else
#define USE_CLASSIFY_BOX
#endif
#ifdef USE_CLASSIFY_BOX
static PX_FORCE_INLINE PxU32 classifyBox(const BucketBox& box, const float limitX, const float limitYZ, const PxU32 yz, const bool isCrossBucket)
{
const bool upperPart = (box.mCenter[yz] + box.mExtents[yz])<limitYZ;
const bool lowerPart = (box.mCenter[yz] - box.mExtents[yz])>limitYZ;
const bool leftPart = (box.mCenter.x + box.mExtents.x)<limitX;
const bool rightPart = (box.mCenter.x - box.mExtents.x)>limitX;
// Table-based box classification avoids many branches
const PxU32 Code = PxU32(rightPart)|(PxU32(leftPart)<<1)|(PxU32(lowerPart)<<2)|(PxU32(upperPart)<<3);
return gCodes[Code + (isCrossBucket ? 16 : 0)];
}
#endif
void BucketPrunerNode::classifyBoxes( float limitX, float limitYZ,
PxU32 nb, BucketBox* PX_RESTRICT boxes, const PrunerPayload* PX_RESTRICT objects,
const PxTransform* PX_RESTRICT transforms,
BucketBox* PX_RESTRICT sortedBoxes, PrunerPayload* PX_RESTRICT sortedObjects,
PxTransform* PX_RESTRICT sortedTransforms,
bool isCrossBucket, PxU32 sortAxis)
{
const PxU32 yz = PxU32(sortAxis == 1 ? 2 : 1);
#ifdef _DEBUG
{
float prev = boxes[0].mDebugMin;
for(PxU32 i=1;i<nb;i++)
{
const float current = boxes[i].mDebugMin;
PX_ASSERT(current>=prev);
prev = current;
}
}
#endif
// Local (stack-based) min/max bucket bounds
PX_ALIGN(16, PxVec4) bucketBoxMin[5];
PX_ALIGN(16, PxVec4) bucketBoxMax[5];
{
const PxBounds3 empty = PxBounds3::empty();
for(PxU32 i=0;i<5;i++)
{
mCounters[i] = 0;
bucketBoxMin[i] = PxVec4(empty.minimum, 0.0f);
bucketBoxMax[i] = PxVec4(empty.maximum, 0.0f);
}
}
{
#ifdef CAN_USE_MOVEMASK
// DS: order doesn't play nice with x86 shuffles :-|
PX_ALIGN(16, PxVec4) limits(-limitX, limitX, -limitYZ, limitYZ);
const bool useY = yz==1;
#endif
// Determine in which bucket each object falls, update bucket bounds
for(PxU32 i=0;i<nb;i++)
{
const Vec4V boxCenterV = AlignedLoad(&boxes[i].mCenter.x);
const Vec4V boxExtentsV = AlignedLoad(&boxes[i].mExtents.x);
const Vec4V boxMinV = V4Sub(boxCenterV, boxExtentsV);
const Vec4V boxMaxV = V4Add(boxCenterV, boxExtentsV);
#ifdef CAN_USE_MOVEMASK
// const PxU32 index = classifyBox_x86(boxes[i], limits, useY, isCrossBucket);
const PxU32 index = classifyBox_x86(boxMinV, boxMaxV, limits, useY, isCrossBucket);
#if PX_DEBUG
const PxU32 index_ = classifyBox(boxes[i], limitX, limitYZ, yz, isCrossBucket);
PX_ASSERT(index == index_);
#endif
#else
const PxU32 index = classifyBox(boxes[i], limitX, limitYZ, yz, isCrossBucket);
#endif
// Merge boxes
{
const Vec4V mergedMinV = V4Min(V4LoadA(&bucketBoxMin[index].x), boxMinV);
const Vec4V mergedMaxV = V4Max(V4LoadA(&bucketBoxMax[index].x), boxMaxV);
V4StoreA(mergedMinV, &bucketBoxMin[index].x);
V4StoreA(mergedMaxV, &bucketBoxMax[index].x);
}
boxes[i].mData0 = index; // Store bucket index for current box in this temporary location
mCounters[index]++;
}
}
{
// Regenerate offsets
mOffsets[0]=0;
for(PxU32 i=0;i<4;i++)
mOffsets[i+1] = mOffsets[i] + mCounters[i];
}
{
// Group boxes with same bucket index together
for(PxU32 i=0;i<nb;i++)
{
const PxU32 bucketOffset = mOffsets[boxes[i].mData0]++; // Bucket index for current box was stored in mData0 by previous loop
// The 2 following lines are the same as:
// sortedBoxes[bucketOffset] = boxes[i];
AlignedStore(AlignedLoad(&boxes[i].mCenter.x), &sortedBoxes[bucketOffset].mCenter.x);
AlignedStore(AlignedLoad(&boxes[i].mExtents.x), &sortedBoxes[bucketOffset].mExtents.x);
#ifdef _DEBUG
sortedBoxes[bucketOffset].mDebugMin = boxes[i].mDebugMin;
#endif
sortedObjects[bucketOffset] = objects[i];
sortedTransforms[bucketOffset] = transforms[i];
}
}
{
// Regenerate offsets
mOffsets[0]=0;
for(PxU32 i=0;i<4;i++)
mOffsets[i+1] = mOffsets[i] + mCounters[i];
}
{
// Convert local (stack-based) min/max bucket bounds to persistent center/extents format
const float Half = 0.5f;
const FloatV HalfV = FLoad(Half);
PX_ALIGN(16, PxVec4) bucketCenter;
PX_ALIGN(16, PxVec4) bucketExtents;
for(PxU32 i=0;i<5;i++)
{
// The following lines are the same as:
// mBucketBox[i].mCenter = bucketBox[i].getCenter();
// mBucketBox[i].mExtents = bucketBox[i].getExtents();
const Vec4V bucketBoxMinV = V4LoadA(&bucketBoxMin[i].x);
const Vec4V bucketBoxMaxV = V4LoadA(&bucketBoxMax[i].x);
const Vec4V bucketBoxCenterV = V4Scale(V4Add(bucketBoxMaxV, bucketBoxMinV), HalfV);
const Vec4V bucketBoxExtentsV = V4Scale(V4Sub(bucketBoxMaxV, bucketBoxMinV), HalfV);
V4StoreA(bucketBoxCenterV, &bucketCenter.x);
V4StoreA(bucketBoxExtentsV, &bucketExtents.x);
mBucketBox[i].mCenter = PxVec3(bucketCenter.x, bucketCenter.y, bucketCenter.z);
mBucketBox[i].mExtents = PxVec3(bucketExtents.x, bucketExtents.y, bucketExtents.z);
}
}
#ifdef _DEBUG
for(PxU32 j=0;j<5;j++)
{
const PxU32 count = mCounters[j];
if(count)
{
const BucketBox* base = sortedBoxes + mOffsets[j];
float prev = base[0].mDebugMin;
for(PxU32 i=1;i<count;i++)
{
const float current = base[i].mDebugMin;
PX_ASSERT(current>=prev);
prev = current;
}
}
}
#endif
}
///////////////////////////////////////////////////////////////////////////////
static void processChildBuckets(PxU32 nbAllocated,
BucketBox* sortedBoxesInBucket, PrunerPayload* sortedObjectsInBucket,
PxTransform* sortedTransformsInBucket,
const BucketPrunerNode& bucket, BucketPrunerNode* PX_RESTRICT childBucket,
BucketBox* PX_RESTRICT baseBucketsBoxes, PrunerPayload* PX_RESTRICT baseBucketsObjects,
PxTransform* baseBucketTransforms,
PxU32 sortAxis)
{
PX_UNUSED(nbAllocated);
const PxU32 yz = PxU32(sortAxis == 1 ? 2 : 1);
for(PxU32 i=0;i<5;i++)
{
const PxU32 nbInBucket = bucket.mCounters[i];
if(!nbInBucket)
{
childBucket[i].initCounters();
continue;
}
BucketBox* bucketsBoxes = baseBucketsBoxes + bucket.mOffsets[i];
PrunerPayload* bucketsObjects = baseBucketsObjects + bucket.mOffsets[i];
PxTransform* bucketTransforms = baseBucketTransforms + bucket.mOffsets[i];
PX_ASSERT(nbInBucket<=nbAllocated);
const float limitX = bucket.mBucketBox[i].mCenter.x;
const float limitYZ = bucket.mBucketBox[i].mCenter[yz];
const bool isCrossBucket = i==4;
childBucket[i].classifyBoxes(limitX, limitYZ, nbInBucket, bucketsBoxes, bucketsObjects,
bucketTransforms,
sortedBoxesInBucket, sortedObjectsInBucket,
sortedTransformsInBucket,
isCrossBucket, sortAxis);
PxMemCopy(bucketsBoxes, sortedBoxesInBucket, sizeof(BucketBox)*nbInBucket);
PxMemCopy(bucketsObjects, sortedObjectsInBucket, sizeof(PrunerPayload)*nbInBucket);
PxMemCopy(bucketTransforms, sortedTransformsInBucket, sizeof(PxTransform)*nbInBucket);
}
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE PxU32 encodeFloat(PxU32 newPos)
{
//we may need to check on -0 and 0
//But it should make no practical difference.
if(newPos & PX_SIGN_BITMASK) //negative?
return ~newPos;//reverse sequence of negative numbers
else
return newPos | PX_SIGN_BITMASK; // flip sign
}
static PX_FORCE_INLINE void computeRayLimits(float& rayMin, float& rayMax, const PxVec3& rayOrig, const PxVec3& rayDir, float maxDist, PxU32 sortAxis)
{
const float rayOrigValue = rayOrig[sortAxis];
const float rayDirValue = rayDir[sortAxis] * maxDist;
rayMin = PxMin(rayOrigValue, rayOrigValue + rayDirValue);
rayMax = PxMax(rayOrigValue, rayOrigValue + rayDirValue);
}
static PX_FORCE_INLINE void computeRayLimits(float& rayMin, float& rayMax, const PxVec3& rayOrig, const PxVec3& rayDir, float maxDist, const PxVec3& inflate, PxU32 sortAxis)
{
const float inflateValue = inflate[sortAxis];
const float rayOrigValue = rayOrig[sortAxis];
const float rayDirValue = rayDir[sortAxis] * maxDist;
rayMin = PxMin(rayOrigValue, rayOrigValue + rayDirValue) - inflateValue;
rayMax = PxMax(rayOrigValue, rayOrigValue + rayDirValue) + inflateValue;
}
static PX_FORCE_INLINE void encodeBoxMinMax(BucketBox& box, const PxU32 axis)
{
const float min = box.mCenter[axis] - box.mExtents[axis];
const float max = box.mCenter[axis] + box.mExtents[axis];
const PxU32* binaryMin = reinterpret_cast<const PxU32*>(&min);
const PxU32* binaryMax = reinterpret_cast<const PxU32*>(&max);
box.mData0 = encodeFloat(binaryMin[0]);
box.mData1 = encodeFloat(binaryMax[0]);
}
///////////////////////////////////////////////////////////////////////////////
BucketPrunerCore::BucketPrunerCore(bool externalMemory) :
mCoreNbObjects (0),
mCoreCapacity (0),
mCoreBoxes (NULL),
mCoreObjects (NULL),
mCoreTransforms (NULL),
mCoreRemap (NULL),
mSortedWorldBoxes (NULL),
mSortedObjects (NULL),
mSortedTransforms (NULL),
#ifdef FREE_PRUNER_SIZE
mNbFree (0),
#endif
mSortedNb (0),
mSortedCapacity (0),
mSortAxis (0),
mDirty (false),
mOwnMemory (!externalMemory)
{
mGlobalBox.setEmpty();
mLevel1.initCounters();
for(PxU32 i=0;i<5;i++)
mLevel2[i].initCounters();
for(PxU32 j=0;j<5;j++)
for(PxU32 i=0;i<5;i++)
mLevel3[j][i].initCounters();
}
BucketPrunerCore::~BucketPrunerCore()
{
release();
}
void BucketPrunerCore::release()
{
mDirty = true;
mCoreNbObjects = 0;
mCoreCapacity = 0;
if(mOwnMemory)
{
PX_FREE(mCoreBoxes);
PX_FREE(mCoreObjects);
PX_FREE(mCoreTransforms);
PX_FREE(mCoreRemap);
}
PX_FREE(mSortedWorldBoxes);
PX_FREE(mSortedObjects);
PX_FREE(mSortedTransforms);
mSortedNb = 0;
mSortedCapacity = 0;
#ifdef FREE_PRUNER_SIZE
mNbFree = 0;
#endif
#ifdef USE_REGULAR_HASH_MAP
mMap.clear();
#else
mMap.purge();
#endif
}
void BucketPrunerCore::setExternalMemory(PxU32 nbObjects, PxBounds3* boxes, PrunerPayload* objects, PxTransform* transforms)
{
PX_ASSERT(!mOwnMemory);
mCoreNbObjects = nbObjects;
mCoreBoxes = boxes;
mCoreObjects = objects;
mCoreTransforms = transforms;
mCoreRemap = NULL;
}
void BucketPrunerCore::allocateSortedMemory(PxU32 nb)
{
mSortedNb = nb;
if(nb<=mSortedCapacity && (nb>=mSortedCapacity/2))
return;
const PxU32 capacity = PxNextPowerOfTwo(nb);
mSortedCapacity = capacity;
PxU32 bytesNeededForBoxes = capacity*sizeof(BucketBox);
bytesNeededForBoxes = ALIGN16(bytesNeededForBoxes);
PxU32 bytesNeededForObjects = capacity*sizeof(PrunerPayload);
bytesNeededForObjects = ALIGN16(bytesNeededForObjects);
// PT: TODO: I don't remember what this alignment is for, maybe we don't need it
PxU32 bytesNeededForTransforms = capacity*sizeof(PxTransform);
bytesNeededForTransforms = ALIGN16(bytesNeededForTransforms);
PX_FREE(mSortedObjects);
PX_FREE(mSortedWorldBoxes);
PX_FREE(mSortedTransforms);
mSortedWorldBoxes = reinterpret_cast<BucketBox*>(PX_ALLOC(bytesNeededForBoxes, "BucketPruner"));
mSortedObjects = reinterpret_cast<PrunerPayload*>(PX_ALLOC(bytesNeededForObjects, "BucketPruner"));
mSortedTransforms = reinterpret_cast<PxTransform*>(PX_ALLOC(bytesNeededForTransforms, "BucketPruner"));
PX_ASSERT(!(size_t(mSortedWorldBoxes)&15));
PX_ASSERT(!(size_t(mSortedObjects)&15));
PX_ASSERT(!(size_t(mSortedTransforms)&15));
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerCore::resizeCore()
{
const PxU32 capacity = mCoreCapacity ? mCoreCapacity*2 : 32;
mCoreCapacity = capacity;
const PxU32 bytesNeededForBoxes = capacity*sizeof(PxBounds3);
const PxU32 bytesNeededForObjects = capacity*sizeof(PrunerPayload);
const PxU32 bytesNeededForTransforms = capacity*sizeof(PxTransform);
const PxU32 bytesNeededForRemap = capacity*sizeof(PxU32);
PxBounds3* newCoreBoxes = reinterpret_cast<PxBounds3*>(PX_ALLOC(bytesNeededForBoxes, "BucketPruner"));
PrunerPayload* newCoreObjects = reinterpret_cast<PrunerPayload*>(PX_ALLOC(bytesNeededForObjects, "BucketPruner"));
PxTransform* newCoreTransforms = reinterpret_cast<PxTransform*>(PX_ALLOC(bytesNeededForTransforms, "BucketPruner"));
PxU32* newCoreRemap = reinterpret_cast<PxU32*>(PX_ALLOC(bytesNeededForRemap, "BucketPruner"));
if(mCoreBoxes)
{
PxMemCopy(newCoreBoxes, mCoreBoxes, mCoreNbObjects*sizeof(PxBounds3));
PX_FREE(mCoreBoxes);
}
if(mCoreObjects)
{
PxMemCopy(newCoreObjects, mCoreObjects, mCoreNbObjects*sizeof(PrunerPayload));
PX_FREE(mCoreObjects);
}
if(mCoreTransforms)
{
PxMemCopy(newCoreTransforms, mCoreTransforms, mCoreNbObjects*sizeof(PxTransform));
PX_FREE(mCoreTransforms);
}
if(mCoreRemap)
{
PxMemCopy(newCoreRemap, mCoreRemap, mCoreNbObjects*sizeof(PxU32));
PX_FREE(mCoreRemap);
}
mCoreBoxes = newCoreBoxes;
mCoreObjects = newCoreObjects;
mCoreTransforms = newCoreTransforms;
mCoreRemap = newCoreRemap;
}
PX_FORCE_INLINE void BucketPrunerCore::addObjectInternal(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp)
{
if(mCoreNbObjects==mCoreCapacity)
resizeCore();
const PxU32 index = mCoreNbObjects++;
mCoreObjects[index] = object;
mCoreBoxes[index] = worldAABB; // PT: TODO: check assembly here
mCoreTransforms[index] = transform; // PT: TODO: check assembly here
mCoreRemap[index] = 0xffffffff;
// Objects are only inserted into the map once they're part of the main/core arrays.
#ifdef USE_REGULAR_HASH_MAP
bool ok = mMap.insert(object, BucketPrunerPair(index, timeStamp));
#else
BucketPrunerPair* ok = mMap.addPair(object, index, timeStamp);
#endif
PX_UNUSED(ok);
PX_ASSERT(ok);
}
bool BucketPrunerCore::addObject(const PrunerPayload& object, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp)
{
/*
We should probably use a bigger payload/userData struct here, which would also contains the external handle.
(EDIT: we can't even do that, because of the setExternalMemory function)
When asked to update/remove an object it would be O(n) to find the proper object in the mSortedObjects array.
-
For removing it we can simply empty the corresponding box, and the object will never be returned from queries.
Maybe this isn't even true, since boxes are sorted along one axis. So marking a box as empty could break the code relying on a sorted order.
An alternative is to mark the external handle as invalid, and ignore the object when a hit is found.
(EDIT: the sorting is now tested via data0/data1 anyway so we could mark the box as empty without breaking this)
-
For updating an object we would need to keep the (sub) array sorted (not the whole thing, only the array within a bucket).
We don't know the range (what part of the array maps to our bucket) but we may have the bucket ID somewhere? If we'd have this
we could parse the array left/right and resort just the right boxes. If we don't have this we may be able to "quickly" find the
range by traversing the tree, looking for the proper bucket. In any case I don't think there's a mapping to update within a bucket,
unlike in SAP or MBP. So we should be able to shuffle a bucket without having to update anything. For example there's no mapping
between the Core array and the Sorted array. It's a shame in a way because we'd need one, but it's not there - and in fact I think
we can free the Core array once Sorted is created, we don't need it at all.
If we don't want to re-sort the full bucket we can just mark it as dirty and ignore the sort-based early exits in the queries. Then we
can incrementally resort it over N frames or something.
This only works if the updated object remains in the same bucket though. If it moves to another bucket it becomes tempting to just remove
the object and re-insert it.
-
Now for adding an object, we can first have a "free pruner" and do the 16 next entries brute-force. Rebuilding every 16 objects might
give a good speedup already. Otherwise we need to do something more complicated.
*/
PX_ASSERT(mOwnMemory);
PX_ASSERT(!mDirty || !mNbFree);
if(!mDirty)
{
#ifdef FREE_PRUNER_SIZE
// In this path the structure is marked as valid. We do not want to invalidate it for each new object...
if(mNbFree<FREE_PRUNER_SIZE)
{
// ...so as long as there is space in the "free array", we store the newly added object there and
// return immediately. Subsequent queries will parse the free array as if it was a free pruner.
const PxU32 index = mNbFree++;
mFreeObjects[index] = object;
mFreeBounds[index] = worldAABB;
mFreeTransforms[index] = transform;
mFreeStamps[index] = timeStamp;
return true;
}
// If we reach this place, the free array is full. We must transfer the objects from the free array to
// the main (core) arrays, mark the structure as invalid, and still deal with the incoming object.
// First we transfer free objects, reset the number of free objects, and mark the structure as
// invalid/dirty (the core arrays will need rebuilding).
for(PxU32 i=0;i<mNbFree;i++)
addObjectInternal(mFreeObjects[i], mFreeBounds[i], mFreeTransforms[i], mFreeStamps[i]);
mNbFree = 0;
#endif
mDirty = true;
// mSortedNb = 0; // PT: TODO: investigate if this should be done here
// After that we still need to deal with the new incoming object (so far we only
// transferred the already existing objects from the full free array). This will
// happen automatically by letting the code continue to the regular codepath below.
}
// If we reach this place, the structure must be invalid and the incoming object
// must be added to the main arrays.
PX_ASSERT(mDirty);
addObjectInternal(object, worldAABB, transform, timeStamp);
return true;
}
bool BucketPrunerCore::removeObject(const PrunerPayload& object, PxU32& timeStamp)
{
// Even if the structure is already marked as dirty, we still need to update the
// core arrays and the map.
// The map only contains core objects, so we can use it to determine if the object
// exists in the core arrays or in the free array.
#ifdef USE_REGULAR_HASH_MAP
/* BucketPrunerPair entry;
if(mMap.findAndErase(object, entry))
{
PxU32 coreIndex = entry.mCoreIndex;
timeStamp = entry.mTimeStamp;*/
const BucketPrunerMap::Entry* removedEntry = mMap.find(object);
if(removedEntry)
{
PxU32 coreIndex = removedEntry->second.mCoreIndex;
timeStamp = removedEntry->second.mTimeStamp;
#else
PxU32 coreIndex; // This is the object's index in the core arrays.
if(mMap.removePair(object, coreIndex, timeStamp))
{
#endif
// In this codepath, the object we want to remove exists in the core arrays.
// We will need to remove it from both the core arrays & the sorted arrays.
const PxU32 sortedIndex = mCoreRemap[coreIndex]; // This is the object's index in the sorted arrays.
#ifdef USE_REGULAR_HASH_MAP
bool status = mMap.erase(object);
PX_ASSERT(status);
PX_UNUSED(status);
#endif
// First let's deal with the core arrays
mCoreNbObjects--;
if(coreIndex!=mCoreNbObjects)
{
// If it wasn't the last object in the array, close the gaps as usual
const PrunerPayload& movedObject = mCoreObjects[mCoreNbObjects];
mCoreBoxes[coreIndex] = mCoreBoxes[mCoreNbObjects];
mCoreTransforms[coreIndex] = mCoreTransforms[mCoreNbObjects];
mCoreObjects[coreIndex] = movedObject;
mCoreRemap[coreIndex] = mCoreRemap[mCoreNbObjects];
// Since we just moved the last object, its index in the core arrays has changed.
// We must reflect this change in the map.
#ifdef USE_REGULAR_HASH_MAP
BucketPrunerMap::Entry* movedEntry = const_cast<BucketPrunerMap::Entry*>(mMap.find(movedObject));
PX_ASSERT(movedEntry->second.mCoreIndex==mCoreNbObjects);
movedEntry->second.mCoreIndex = coreIndex;
#else
BucketPrunerPair* movedEntry = const_cast<BucketPrunerPair*>(mMap.findPair(movedObject));
PX_ASSERT(movedEntry->mCoreIndex==mCoreNbObjects);
movedEntry->mCoreIndex = coreIndex;
#endif
}
// Now, let's deal with the sorted arrays.
// If the structure is dirty, the sorted arrays will be rebuilt from scratch so there's no need to
// update them right now.
if(!mDirty)
{
// If the structure is valid, we want to keep it this way to avoid rebuilding sorted arrays after
// each removal. We can't "close the gaps" easily here because order of objects in the arrays matters.
// Instead we just invalidate the object by setting its bounding box as empty.
// Queries against empty boxes will never return a hit, so this effectively "removes" the object
// from any subsequent query results. Sorted arrays now contain a "disabled" object, until next build.
// Invalidating the box does not invalidate the sorting, since it's now captured in mData0/mData1.
// That is, mData0/mData1 keep their previous integer-encoded values, as if the box/object was still here.
mSortedWorldBoxes[sortedIndex].mCenter = PxVec3(0.0f);
mSortedWorldBoxes[sortedIndex].mExtents = PxVec3(-GU_EMPTY_BOUNDS_EXTENTS);
// Note that we don't touch mSortedObjects here. We could, but this is not necessary.
}
return true;
}
#ifdef FREE_PRUNER_SIZE
// Here, the object we want to remove exists in the free array. So we just parse it.
for(PxU32 i=0;i<mNbFree;i++)
{
if(mFreeObjects[i]==object)
{
// We found the object we want to remove. Close the gap as usual.
timeStamp = mFreeStamps[i];
mNbFree--;
mFreeBounds[i] = mFreeBounds[mNbFree];
mFreeTransforms[i] = mFreeTransforms[mNbFree];
mFreeObjects[i] = mFreeObjects[mNbFree];
mFreeStamps[i] = mFreeStamps[mNbFree];
return true;
}
}
#endif
// We didn't find the object. Can happen with a double remove. PX_ASSERT might be an option here.
return false;
}
bool BucketPrunerCore::updateObject(const PxBounds3& worldAABB, const PrunerPayload& object, const PxTransform& transform)
{
PxU32 timeStamp;
if(!removeObject(object, timeStamp))
return false;
return addObject(object, worldAABB, transform, timeStamp);
}
PxU32 BucketPrunerCore::removeMarkedObjects(PxU32 timeStamp)
{
PxU32 nbRemoved=0;
// PT: objects can be either in the hash-map, or in the 'free' array. First we look in the hash-map...
#ifdef USE_REGULAR_HASH_MAP
if(mMap.size())
#else
if(mMap.mNbActivePairs)
#endif
{
PxBounds3 empty;
empty.setEmpty();
const PxVec3 emptyCenter = empty.getCenter();
const PxVec3 emptyExtents = empty.getExtents();
// PT: hash-map is coalesced so we just parse it in linear order, no holes
PxU32 i=0;
#ifdef USE_REGULAR_HASH_MAP
PxU32 nbActivePairs = mMap.size();
const BucketPrunerMap::Entry* entries = mMap.mBase.getEntries();
#else
PxU32 nbActivePairs = mMap.mNbActivePairs;
#endif
PxU32 coreNbObjects = mCoreNbObjects; // PT: to avoid LHS
while(i<nbActivePairs)
{
#ifdef USE_REGULAR_HASH_MAP
const BucketPrunerMap::Entry& p = entries[i];
if(p.second.mTimeStamp==timeStamp)
#else
const BucketPrunerPair& p = mMap.mActivePairs[i];
if(p.mTimeStamp==timeStamp)
#endif
{
// PT: timestamps match. We must remove this object.
// PT: we replicate here what we do in BucketPrunerCore::removeObject(). See that function for details.
#ifdef USE_REGULAR_HASH_MAP
const PxU32 coreIndex = p.second.mCoreIndex;
#else
const PxU32 coreIndex = p.mCoreIndex;
#endif
if(!mDirty)
{
// PT: invalidating the box does not invalidate the sorting, since it's now captured in mData0/mData1
const PxU32 sortedIndex = mCoreRemap[coreIndex];
mSortedWorldBoxes[sortedIndex].mCenter = emptyCenter;
mSortedWorldBoxes[sortedIndex].mExtents = emptyExtents;
}
coreNbObjects--;
if(coreIndex!=coreNbObjects)
{
const PrunerPayload& movedObject = mCoreObjects[coreNbObjects];
mCoreBoxes[coreIndex] = mCoreBoxes[coreNbObjects];
mCoreTransforms[coreIndex] = mCoreTransforms[coreNbObjects];
mCoreObjects[coreIndex] = movedObject;
mCoreRemap[coreIndex] = mCoreRemap[coreNbObjects];
#ifdef USE_REGULAR_HASH_MAP
BucketPrunerMap::Entry* movedEntry = const_cast<BucketPrunerMap::Entry*>(mMap.find(movedObject));
PX_ASSERT(movedEntry->second.mCoreIndex==coreNbObjects);
movedEntry->second.mCoreIndex = coreIndex;
#else
BucketPrunerPair* movedEntry = const_cast<BucketPrunerPair*>(mMap.findPair(movedObject));
PX_ASSERT(movedEntry->mCoreIndex==coreNbObjects);
movedEntry->mCoreIndex = coreIndex;
#endif
}
nbRemoved++;
#ifdef USE_REGULAR_HASH_MAP
bool status = mMap.erase(p.first);
PX_ASSERT(status);
PX_UNUSED(status);
#else
const PxU32 hashValue = PxComputeHash(p.mData) & mMap.mMask;
mMap.removePairInternal(p.mData, hashValue, i);
#endif
nbActivePairs--;
}
else i++;
}
mCoreNbObjects = coreNbObjects;
#ifdef USE_REGULAR_HASH_MAP
#else
mMap.shrinkMemory();
#endif
}
#ifdef FREE_PRUNER_SIZE
// PT: ...then we look in the 'free' array
PxU32 i=0;
while(i<mNbFree)
{
if(mFreeStamps[i]==timeStamp)
{
nbRemoved++;
mNbFree--;
mFreeBounds[i] = mFreeBounds[mNbFree];
mFreeTransforms[i] = mFreeTransforms[mNbFree];
mFreeObjects[i] = mFreeObjects[mNbFree];
mFreeStamps[i] = mFreeStamps[mNbFree];
}
else i++;
}
#endif
return nbRemoved;
}
///////////////////////////////////////////////////////////////////////////////
static PxU32 sortBoxes( PxU32 nb, const PxBounds3* PX_RESTRICT boxes, const PrunerPayload* PX_RESTRICT objects,
const PxTransform* PX_RESTRICT transforms,
BucketBox& _globalBox, BucketBox* PX_RESTRICT sortedBoxes, PrunerPayload* PX_RESTRICT sortedObjects
, PxTransform* PX_RESTRICT sortedTransforms)
{
// Compute global box & sort axis
PxU32 sortAxis;
{
PX_ASSERT(nb>0);
Vec4V mergedMinV = V4LoadU(&boxes[nb-1].minimum.x);
Vec4V mergedMaxV = Vec4V_From_Vec3V(V3LoadU(&boxes[nb-1].maximum.x));
for(PxU32 i=0;i<nb-1;i++)
{
mergedMinV = V4Min(mergedMinV, V4LoadU(&boxes[i].minimum.x));
mergedMaxV = V4Max(mergedMaxV, V4LoadU(&boxes[i].maximum.x));
}
/* PX_ALIGN(16, PxVec4) mergedMin;
PX_ALIGN(16, PxVec4) mergedMax;
V4StoreA(mergedMinV, &mergedMin.x);
V4StoreA(mergedMaxV, &mergedMax.x);
_globalBox.mCenter.x = (mergedMax.x + mergedMin.x)*0.5f;
_globalBox.mCenter.y = (mergedMax.y + mergedMin.y)*0.5f;
_globalBox.mCenter.z = (mergedMax.z + mergedMin.z)*0.5f;
_globalBox.mExtents.x = (mergedMax.x - mergedMin.x)*0.5f;
_globalBox.mExtents.y = (mergedMax.y - mergedMin.y)*0.5f;
_globalBox.mExtents.z = (mergedMax.z - mergedMin.z)*0.5f;*/
const float Half = 0.5f;
const FloatV HalfV = FLoad(Half);
PX_ALIGN(16, PxVec4) mergedCenter;
PX_ALIGN(16, PxVec4) mergedExtents;
const Vec4V mergedCenterV = V4Scale(V4Add(mergedMaxV, mergedMinV), HalfV);
const Vec4V mergedExtentsV = V4Scale(V4Sub(mergedMaxV, mergedMinV), HalfV);
V4StoreA(mergedCenterV, &mergedCenter.x);
V4StoreA(mergedExtentsV, &mergedExtents.x);
_globalBox.mCenter = PxVec3(mergedCenter.x, mergedCenter.y, mergedCenter.z);
_globalBox.mExtents = PxVec3(mergedExtents.x, mergedExtents.y, mergedExtents.z);
const PxF32 absY = PxAbs(_globalBox.mExtents.y);
const PxF32 absZ = PxAbs(_globalBox.mExtents.z);
sortAxis = PxU32(absY < absZ ? 1 : 2);
// printf("Sort axis: %d\n", sortAxis);
}
float* keys = reinterpret_cast<float*>(sortedObjects);
for(PxU32 i=0;i<nb;i++)
keys[i] = boxes[i].minimum[sortAxis];
Cm::RadixSortBuffered rs; // ###TODO: some allocs here, remove
const PxU32* ranks = rs.Sort(keys, nb).GetRanks();
const float Half = 0.5f;
const FloatV HalfV = FLoad(Half);
for(PxU32 i=0;i<nb;i++)
{
const PxU32 index = *ranks++;
//const PxU32 index = local[i].index;
// sortedBoxes[i].mCenter = boxes[index].getCenter();
// sortedBoxes[i].mExtents = boxes[index].getExtents();
const Vec4V bucketBoxMinV = V4LoadU(&boxes[index].minimum.x);
const Vec4V bucketBoxMaxV = Vec4V_From_Vec3V(V3LoadU(&boxes[index].maximum.x));
const Vec4V bucketBoxCenterV = V4Scale(V4Add(bucketBoxMaxV, bucketBoxMinV), HalfV);
const Vec4V bucketBoxExtentsV = V4Scale(V4Sub(bucketBoxMaxV, bucketBoxMinV), HalfV);
// We don't need to preserve data0/data1 here
AlignedStore(bucketBoxCenterV, &sortedBoxes[i].mCenter.x);
AlignedStore(bucketBoxExtentsV, &sortedBoxes[i].mExtents.x);
#ifdef _DEBUG
sortedBoxes[i].mDebugMin = boxes[index].minimum[sortAxis];
#endif
sortedObjects[i] = objects[index];
sortedTransforms[i] = transforms[index];
}
return sortAxis;
}
#ifdef NODE_SORT
template<class T>
PX_CUDA_CALLABLE PX_FORCE_INLINE void tswap(T& x, T& y)
{
T tmp = x;
x = y;
y = tmp;
}
/* PX_FORCE_INLINE __m128 DotV(const __m128 a, const __m128 b)
{
const __m128 dot1 = _mm_mul_ps(a, b);
const __m128 shuf1 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(dot1), _MM_SHUFFLE(0,0,0,0)));
const __m128 shuf2 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(dot1), _MM_SHUFFLE(1,1,1,1)));
const __m128 shuf3 = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(dot1), _MM_SHUFFLE(2,2,2,2)));
return _mm_add_ps(_mm_add_ps(shuf1, shuf2), shuf3);
}*/
// PT: hmmm, by construction, isn't the order always the same for all bucket pruners?
// => maybe not because the bucket boxes are still around the merged aabbs, not around the bucket
// Still we could do something here
static /*PX_FORCE_INLINE*/ PxU32 sort(const BucketPrunerNode& parent, const PxVec3& rayDir)
{
const PxU32 totalCount = parent.mCounters[0]+parent.mCounters[1]+parent.mCounters[2]+parent.mCounters[3]+parent.mCounters[4];
if(totalCount<NODE_SORT_MIN_COUNT)
return 0|(1<<3)|(2<<6)|(3<<9)|(4<<12);
float dp[5];
/* const __m128 rayDirV = _mm_loadu_ps(&rayDir.x);
__m128 dp0V = DotV(rayDirV, _mm_loadu_ps(&parent.mBucketBox[0].mCenter.x)); _mm_store_ss(&dp[0], dp0V);
__m128 dp1V = DotV(rayDirV, _mm_loadu_ps(&parent.mBucketBox[1].mCenter.x)); _mm_store_ss(&dp[1], dp1V);
__m128 dp2V = DotV(rayDirV, _mm_loadu_ps(&parent.mBucketBox[2].mCenter.x)); _mm_store_ss(&dp[2], dp2V);
__m128 dp3V = DotV(rayDirV, _mm_loadu_ps(&parent.mBucketBox[3].mCenter.x)); _mm_store_ss(&dp[3], dp3V);
__m128 dp4V = DotV(rayDirV, _mm_loadu_ps(&parent.mBucketBox[4].mCenter.x)); _mm_store_ss(&dp[4], dp4V);
*/
#ifdef VERIFY_SORT
PxU32 code;
{
dp[0] = parent.mCounters[0] ? PxAbs(parent.mBucketBox[0].mCenter.dot(rayDir)) : PX_MAX_F32;
dp[1] = parent.mCounters[1] ? PxAbs(parent.mBucketBox[1].mCenter.dot(rayDir)) : PX_MAX_F32;
dp[2] = parent.mCounters[2] ? PxAbs(parent.mBucketBox[2].mCenter.dot(rayDir)) : PX_MAX_F32;
dp[3] = parent.mCounters[3] ? PxAbs(parent.mBucketBox[3].mCenter.dot(rayDir)) : PX_MAX_F32;
dp[4] = parent.mCounters[4] ? PxAbs(parent.mBucketBox[4].mCenter.dot(rayDir)) : PX_MAX_F32;
PxU32 ii0 = 0;
PxU32 ii1 = 1;
PxU32 ii2 = 2;
PxU32 ii3 = 3;
PxU32 ii4 = 4;
// PT: using integer cmps since we used fabsf above
// const PxU32* values = reinterpret_cast<const PxU32*>(dp);
const PxU32* values = PxUnionCast<PxU32*, PxF32*>(dp);
PxU32 value0 = values[0];
PxU32 value1 = values[1];
PxU32 value2 = values[2];
PxU32 value3 = values[3];
PxU32 value4 = values[4];
for(PxU32 j=0;j<5-1;j++)
{
if(value1<value0)
{
tswap(value0, value1);
tswap(ii0, ii1);
}
if(value2<value1)
{
tswap(value1, value2);
tswap(ii1, ii2);
}
if(value3<value2)
{
tswap(value2, value3);
tswap(ii2, ii3);
}
if(value4<value3)
{
tswap(value3, value4);
tswap(ii3, ii4);
}
}
//return ii0|(ii1<<3)|(ii2<<6)|(ii3<<9)|(ii4<<12);
code = ii0|(ii1<<3)|(ii2<<6)|(ii3<<9)|(ii4<<12);
}
#endif
dp[0] = parent.mCounters[0] ? parent.mBucketBox[0].mCenter.dot(rayDir) : PX_MAX_F32;
dp[1] = parent.mCounters[1] ? parent.mBucketBox[1].mCenter.dot(rayDir) : PX_MAX_F32;
dp[2] = parent.mCounters[2] ? parent.mBucketBox[2].mCenter.dot(rayDir) : PX_MAX_F32;
dp[3] = parent.mCounters[3] ? parent.mBucketBox[3].mCenter.dot(rayDir) : PX_MAX_F32;
dp[4] = parent.mCounters[4] ? parent.mBucketBox[4].mCenter.dot(rayDir) : PX_MAX_F32;
const PxU32* values = PxUnionCast<PxU32*, PxF32*>(dp);
// const PxU32 mask = ~7U;
const PxU32 mask = 0x7ffffff8;
PxU32 value0 = (values[0]&mask);
PxU32 value1 = (values[1]&mask)|1;
PxU32 value2 = (values[2]&mask)|2;
PxU32 value3 = (values[3]&mask)|3;
PxU32 value4 = (values[4]&mask)|4;
#define SORT_BLOCK \
if(value1<value0) tswap(value0, value1); \
if(value2<value1) tswap(value1, value2); \
if(value3<value2) tswap(value2, value3); \
if(value4<value3) tswap(value3, value4);
SORT_BLOCK
SORT_BLOCK
SORT_BLOCK
SORT_BLOCK
const PxU32 ii0 = value0&7;
const PxU32 ii1 = value1&7;
const PxU32 ii2 = value2&7;
const PxU32 ii3 = value3&7;
const PxU32 ii4 = value4&7;
const PxU32 code2 = ii0|(ii1<<3)|(ii2<<6)|(ii3<<9)|(ii4<<12);
#ifdef VERIFY_SORT
PX_ASSERT(code2==code);
#endif
return code2;
}
static void gPrecomputeSort(BucketPrunerNode& node, const PxVec3* PX_RESTRICT dirs)
{
for(int i=0;i<8;i++)
node.mOrder[i] = PxTo16(sort(node, dirs[i]));
}
#endif
void BucketPrunerCore::classifyBoxes()
{
if(!mDirty)
return;
mDirty = false;
const PxU32 nb = mCoreNbObjects;
if(!nb)
{
mSortedNb=0;
return;
}
PX_ASSERT(!mNbFree);
#ifdef BRUTE_FORCE_LIMIT
if(nb<=BRUTE_FORCE_LIMIT)
{
allocateSortedMemory(nb);
BucketBox* sortedBoxes = mSortedWorldBoxes;
PrunerPayload* sortedObjects = mSortedObjects;
const float Half = 0.5f;
const __m128 HalfV = _mm_load1_ps(&Half);
PX_ALIGN(16, PxVec4) bucketCenter;
PX_ALIGN(16, PxVec4) bucketExtents;
for(PxU32 i=0;i<nb;i++)
{
const __m128 bucketBoxMinV = _mm_loadu_ps(&mCoreBoxes[i].minimum.x);
const __m128 bucketBoxMaxV = _mm_loadu_ps(&mCoreBoxes[i].maximum.x);
const __m128 bucketBoxCenterV = _mm_mul_ps(_mm_add_ps(bucketBoxMaxV, bucketBoxMinV), HalfV);
const __m128 bucketBoxExtentsV = _mm_mul_ps(_mm_sub_ps(bucketBoxMaxV, bucketBoxMinV), HalfV);
_mm_store_ps(&bucketCenter.x, bucketBoxCenterV);
_mm_store_ps(&bucketExtents.x, bucketBoxExtentsV);
sortedBoxes[i].mCenter = PxVec3(bucketCenter.x, bucketCenter.y, bucketCenter.z);
sortedBoxes[i].mExtents = PxVec3(bucketExtents.x, bucketExtents.y, bucketExtents.z);
sortedObjects[i] = mCoreObjects[i];
}
return;
}
#endif
size_t* remap = reinterpret_cast<size_t*>(PX_ALLOC(nb*sizeof(size_t), ""));
for(PxU32 i=0;i<nb;i++)
{
remap[i] = mCoreObjects[i].data[0];
mCoreObjects[i].data[0] = i;
}
// printf("Nb objects: %d\n", nb);
PrunerPayload localTempObjects[LOCAL_SIZE];
BucketBox localTempBoxes[LOCAL_SIZE];
PxTransform localTempTransforms[LOCAL_SIZE];
PrunerPayload* tempObjects;
PxTransform* tempTransforms;
BucketBox* tempBoxes;
if(nb>LOCAL_SIZE)
{
tempObjects = PX_ALLOCATE(PrunerPayload, nb, "BucketPruner");
tempBoxes = PX_ALLOCATE(BucketBox, nb, "BucketPruner");
tempTransforms = PX_ALLOCATE(PxTransform, nb, "BucketPruner");
}
else
{
tempObjects = localTempObjects;
tempBoxes = localTempBoxes;
tempTransforms = localTempTransforms;
}
mSortAxis = sortBoxes(nb, mCoreBoxes, mCoreObjects, mCoreTransforms, mGlobalBox, tempBoxes, tempObjects, tempTransforms);
PX_ASSERT(mSortAxis);
allocateSortedMemory(nb);
BucketBox* sortedBoxes = mSortedWorldBoxes;
PrunerPayload* sortedObjects = mSortedObjects;
PxTransform* sortedTransforms = mSortedTransforms;
const PxU32 yz = PxU32(mSortAxis == 1 ? 2 : 1);
const float limitX = mGlobalBox.mCenter.x;
const float limitYZ = mGlobalBox.mCenter[yz];
mLevel1.classifyBoxes(limitX, limitYZ, nb, tempBoxes, tempObjects, tempTransforms, sortedBoxes, sortedObjects, sortedTransforms, false, mSortAxis);
processChildBuckets(nb, tempBoxes, tempObjects, tempTransforms, mLevel1, mLevel2, mSortedWorldBoxes, mSortedObjects, mSortedTransforms, mSortAxis);
for(PxU32 j=0;j<5;j++)
processChildBuckets(nb, tempBoxes, tempObjects, tempTransforms, mLevel2[j], mLevel3[j], mSortedWorldBoxes + mLevel1.mOffsets[j], mSortedObjects + mLevel1.mOffsets[j], mSortedTransforms + mLevel1.mOffsets[j], mSortAxis);
{
for(PxU32 i=0;i<nb;i++)
{
encodeBoxMinMax(mSortedWorldBoxes[i], mSortAxis);
}
}
if(nb>LOCAL_SIZE)
{
PX_FREE(tempTransforms);
PX_FREE(tempBoxes);
PX_FREE(tempObjects);
}
for(PxU32 i=0;i<nb;i++)
{
const PxU32 coreIndex = PxU32(mSortedObjects[i].data[0]);
const size_t saved = remap[coreIndex];
mSortedObjects[i].data[0] = saved;
mCoreObjects[coreIndex].data[0] = saved;
if(mCoreRemap)
mCoreRemap[coreIndex] = i;
// remap[i] = mCoreObjects[i].data[0];
// mCoreObjects[i].data[0] = i;
}
PX_FREE(remap);
/* if(mOwnMemory)
{
PX_FREE(mCoreBoxes);
PX_FREE(mCoreObjects);
}*/
#ifdef NODE_SORT
{
PxVec3 dirs[8];
dirs[0] = PxVec3(1.0f, 1.0f, 1.0f);
dirs[1] = PxVec3(1.0f, 1.0f, -1.0f);
dirs[2] = PxVec3(1.0f, -1.0f, 1.0f);
dirs[3] = PxVec3(1.0f, -1.0f, -1.0f);
dirs[4] = PxVec3(-1.0f, 1.0f, 1.0f);
dirs[5] = PxVec3(-1.0f, 1.0f, -1.0f);
dirs[6] = PxVec3(-1.0f, -1.0f, 1.0f);
dirs[7] = PxVec3(-1.0f, -1.0f, -1.0f);
for(int i=0;i<8;i++)
dirs[i].normalize();
gPrecomputeSort(mLevel1, dirs);
for(PxU32 i=0;i<5;i++)
gPrecomputeSort(mLevel2[i], dirs);
for(PxU32 j=0;j<5;j++)
{
for(PxU32 i=0;i<5;i++)
gPrecomputeSort(mLevel3[j][i], dirs);
}
}
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#ifdef CAN_USE_MOVEMASK
namespace
{
struct RayParams
{
PX_ALIGN(16, PxVec3 mData2); float padding0;
PX_ALIGN(16, PxVec3 mFDir); float padding1;
PX_ALIGN(16, PxVec3 mData); float padding2;
PX_ALIGN(16, PxVec3 mInflate); float padding3;
};
}
static PX_FORCE_INLINE void precomputeRayData(RayParams* PX_RESTRICT rayParams, const PxVec3& rayOrig, const PxVec3& rayDir, float maxDist)
{
#ifdef USE_SIMD
const float Half = 0.5f * maxDist;
const __m128 HalfV = _mm_load1_ps(&Half);
const __m128 DataV = _mm_mul_ps(_mm_loadu_ps(&rayDir.x), HalfV);
const __m128 Data2V = _mm_add_ps(_mm_loadu_ps(&rayOrig.x), DataV);
const PxU32 MaskI = 0x7fffffff;
const __m128 FDirV = _mm_and_ps(_mm_load1_ps(reinterpret_cast<const float*>(&MaskI)), DataV);
_mm_store_ps(&rayParams->mData.x, DataV);
_mm_store_ps(&rayParams->mData2.x, Data2V);
_mm_store_ps(&rayParams->mFDir.x, FDirV);
#else
const PxVec3 data = 0.5f * rayDir * maxDist;
rayParams->mData = data;
rayParams->mData2 = rayOrig + data;
rayParams->mFDir.x = PxAbs(data.x);
rayParams->mFDir.y = PxAbs(data.y);
rayParams->mFDir.z = PxAbs(data.z);
#endif
}
template <int inflateT>
static PX_FORCE_INLINE PxIntBool segmentAABB(const BucketBox& box, const RayParams* PX_RESTRICT params)
{
#ifdef USE_SIMD
const PxU32 maskI = 0x7fffffff;
const __m128 fdirV = _mm_load_ps(¶ms->mFDir.x);
// #ifdef _DEBUG
const __m128 extentsV = inflateT ? _mm_add_ps(_mm_loadu_ps(&box.mExtents.x), _mm_load_ps(¶ms->mInflate.x)) : _mm_loadu_ps(&box.mExtents.x);
const __m128 DV = _mm_sub_ps(_mm_load_ps(¶ms->mData2.x), _mm_loadu_ps(&box.mCenter.x));
/* #else
const __m128 extentsV = inflateT ? _mm_add_ps(_mm_load_ps(&box.mExtents.x), _mm_load_ps(¶ms->mInflate.x)) : _mm_load_ps(&box.mExtents.x);
const __m128 DV = _mm_sub_ps(_mm_load_ps(¶ms->mData2.x), _mm_load_ps(&box.mCenter.x));
#endif*/
__m128 absDV = _mm_and_ps(DV, _mm_load1_ps(reinterpret_cast<const float*>(&maskI)));
absDV = _mm_cmpgt_ps(absDV, _mm_add_ps(extentsV, fdirV));
const PxU32 test = PxU32(_mm_movemask_ps(absDV));
if(test&7)
return 0;
const __m128 dataZYX_V = _mm_load_ps(¶ms->mData.x);
const __m128 dataXZY_V = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(dataZYX_V), _MM_SHUFFLE(3,0,2,1)));
const __m128 DXZY_V = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(DV), _MM_SHUFFLE(3,0,2,1)));
const __m128 fV = _mm_sub_ps(_mm_mul_ps(dataZYX_V, DXZY_V), _mm_mul_ps(dataXZY_V, DV));
const __m128 fdirZYX_V = _mm_load_ps(¶ms->mFDir.x);
const __m128 fdirXZY_V = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(fdirZYX_V), _MM_SHUFFLE(3,0,2,1)));
const __m128 extentsXZY_V = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(extentsV), _MM_SHUFFLE(3,0,2,1)));
const __m128 fg = _mm_add_ps(_mm_mul_ps(extentsV, fdirXZY_V), _mm_mul_ps(extentsXZY_V, fdirZYX_V));
__m128 absfV = _mm_and_ps(fV, _mm_load1_ps(reinterpret_cast<const float*>(&maskI)));
absfV = _mm_cmpgt_ps(absfV, fg);
const PxU32 test2 = PxU32(_mm_movemask_ps(absfV));
if(test2&7)
return 0;
return 1;
#else
const float boxExtentsx = inflateT ? box.mExtents.x + params->mInflate.x : box.mExtents.x;
const float Dx = params->mData2.x - box.mCenter.x; if(fabsf(Dx) > boxExtentsx + params->mFDir.x) return PxIntFalse;
const float boxExtentsz = inflateT ? box.mExtents.z + params->mInflate.z : box.mExtents.z;
const float Dz = params->mData2.z - box.mCenter.z; if(fabsf(Dz) > boxExtentsz + params->mFDir.z) return PxIntFalse;
const float boxExtentsy = inflateT ? box.mExtents.y + params->mInflate.y : box.mExtents.y;
const float Dy = params->mData2.y - box.mCenter.y; if(fabsf(Dy) > boxExtentsy + params->mFDir.y) return PxIntFalse;
float f;
f = params->mData.y * Dz - params->mData.z * Dy; if(fabsf(f) > boxExtentsy*params->mFDir.z + boxExtentsz*params->mFDir.y) return PxIntFalse;
f = params->mData.z * Dx - params->mData.x * Dz; if(fabsf(f) > boxExtentsx*params->mFDir.z + boxExtentsz*params->mFDir.x) return PxIntFalse;
f = params->mData.x * Dy - params->mData.y * Dx; if(fabsf(f) > boxExtentsx*params->mFDir.y + boxExtentsy*params->mFDir.x) return PxIntFalse;
return PxIntTrue;
#endif
}
#else
#include "GuBVHTestsSIMD.h"
typedef RayAABBTest BPRayAABBTest;
template <int inflateT>
static PX_FORCE_INLINE PxIntBool segmentAABB(const BucketBox& box, const BPRayAABBTest& test)
{
return static_cast<PxIntBool>(test.check<inflateT>(V3LoadU(box.mCenter), V3LoadU(box.mExtents)));
}
/*static PX_FORCE_INLINE IntBool segmentAABB(const BucketBox& box, const BPRayAABBTest& test, PxU32 rayMinLimitX, PxU32 rayMaxLimitX)
{
if(rayMinLimitX>box.mData1)
return 0;
if(rayMaxLimitX<box.mData0)
return 0;
return test(Vec3V_From_PxVec3(box.mCenter), Vec3V_From_PxVec3(box.mExtents));
}*/
#endif
namespace
{
struct BucketPrunerRaycastAdapter
{
PX_FORCE_INLINE BucketPrunerRaycastAdapter(PrunerRaycastCallback& pcb, const PrunerPayload* payloads, const PxTransform* transforms) :
mCallback(pcb), mPayloads(payloads), mTransforms(transforms) {}
PX_FORCE_INLINE bool invoke(PxReal& distance, PxU32 primIndex)
{
return mCallback.invoke(distance, primIndex, mPayloads, mTransforms);
}
PrunerRaycastCallback& mCallback;
const PrunerPayload* mPayloads;
const PxTransform* mTransforms;
PX_NOCOPY(BucketPrunerRaycastAdapter)
};
struct BucketPrunerOverlapAdapter
{
PX_FORCE_INLINE BucketPrunerOverlapAdapter(PrunerOverlapCallback& pcb, const PrunerPayload* payloads, const PxTransform* transforms) :
mCallback(pcb), mPayloads(payloads), mTransforms(transforms) {}
PX_FORCE_INLINE bool invoke(PxU32 primIndex)
{
return mCallback.invoke(primIndex, mPayloads, mTransforms);
}
PrunerOverlapCallback& mCallback;
const PrunerPayload* mPayloads;
const PxTransform* mTransforms;
PX_NOCOPY(BucketPrunerOverlapAdapter)
};
}
template <int inflateT>
static bool processBucket(
PxU32 nb, const BucketBox* PX_RESTRICT baseBoxes, const PrunerPayload* PX_RESTRICT baseObjects,
const PxTransform* PX_RESTRICT baseTransforms, PxU32 offset, PxU32 totalAllocated,
const PxVec3& rayOrig, const PxVec3& rayDir, float& maxDist,
#ifdef CAN_USE_MOVEMASK
RayParams* PX_RESTRICT rayParams,
#else
BPRayAABBTest& test, const PxVec3& inflate,
#endif
PrunerRaycastCallback& pcbArgName, PxU32& _rayMinLimitInt, PxU32& _rayMaxLimitInt, PxU32 sortAxis)
{
PX_UNUSED(totalAllocated);
const BucketBox* PX_RESTRICT _boxes = baseBoxes + offset;
BucketPrunerRaycastAdapter pcb(pcbArgName, baseObjects + offset, baseTransforms + offset);
PxU32 rayMinLimitInt = _rayMinLimitInt;
PxU32 rayMaxLimitInt = _rayMaxLimitInt;
const BucketBox* last = _boxes + nb;
PxU32 objectID = 0;
while(_boxes!=last)
{
const BucketBox& currentBox = *_boxes++;
const PxU32 currentID = objectID++;
if(currentBox.mData1<rayMinLimitInt)
continue;
if(currentBox.mData0>rayMaxLimitInt)
goto Exit;
#ifdef CAN_USE_MOVEMASK
if(!segmentAABB<inflateT>(currentBox, rayParams))
continue;
#else
if(!segmentAABB<inflateT>(currentBox, test))
continue;
#endif
const float MaxDist = maxDist;
const bool again = pcb.invoke(maxDist, currentID);
if(!again)
return false;
if(maxDist < MaxDist)
{
float rayMinLimit, rayMaxLimit;
#ifdef CAN_USE_MOVEMASK
if(inflateT)
computeRayLimits(rayMinLimit, rayMaxLimit, rayOrig, rayDir, maxDist, rayParams->mInflate, sortAxis);
else
computeRayLimits(rayMinLimit, rayMaxLimit, rayOrig, rayDir, maxDist, sortAxis);
precomputeRayData(rayParams, rayOrig, rayDir, maxDist);
#else
if(inflateT)
computeRayLimits(rayMinLimit, rayMaxLimit, rayOrig, rayDir, maxDist, inflate, sortAxis);
else
computeRayLimits(rayMinLimit, rayMaxLimit, rayOrig, rayDir, maxDist, sortAxis);
test.setDistance(maxDist);
#endif
const PxU32* binaryMinLimit = reinterpret_cast<const PxU32*>(&rayMinLimit);
const PxU32* binaryMaxLimit = reinterpret_cast<const PxU32*>(&rayMaxLimit);
rayMinLimitInt = encodeFloat(binaryMinLimit[0]);
rayMaxLimitInt = encodeFloat(binaryMaxLimit[0]);
}
}
Exit:
_rayMinLimitInt = rayMinLimitInt;
_rayMaxLimitInt = rayMaxLimitInt;
return true;
}
#ifdef NODE_SORT
static PxU32 computeDirMask(const PxVec3& dir)
{
const PxU32* binary = reinterpret_cast<const PxU32*>(&dir.x);
const PxU32 X = (binary[0])>>31;
const PxU32 Y = (binary[1])>>31;
const PxU32 Z = (binary[2])>>31;
return Z|(Y<<1)|(X<<2);
}
#endif
template <int inflateT>
static bool stab(const BucketPrunerCore& core, PrunerRaycastCallback& pcbArgName, const PxVec3& rayOrig, const PxVec3& rayDir, float& maxDist, const PxVec3 inflate)
{
const PxU32 nb = core.mSortedNb;
if(!nb
#ifdef FREE_PRUNER_SIZE
&& !core.mNbFree
#endif
)
return true;
if(maxDist==PX_MAX_F32)
{
/*const*/ PxVec3 boxMin = core.mGlobalBox.getMin() - inflate;
/*const*/ PxVec3 boxMax = core.mGlobalBox.getMax() + inflate;
#ifdef FREE_PRUNER_SIZE
if(core.mNbFree)
{
// TODO: optimize this
PxBounds3 freeGlobalBounds;
freeGlobalBounds.setEmpty();
for(PxU32 i=0;i<core.mNbFree;i++)
freeGlobalBounds.include(core.mFreeBounds[i]);
freeGlobalBounds.minimum -= inflate;
freeGlobalBounds.maximum += inflate;
boxMin = boxMin.minimum(freeGlobalBounds.minimum);
boxMax = boxMax.maximum(freeGlobalBounds.maximum);
}
#endif
clipRay(rayOrig, rayDir, maxDist, boxMin, boxMax);
}
#ifdef CAN_USE_MOVEMASK
RayParams rayParams;
#ifdef USE_SIMD
rayParams.padding0 = rayParams.padding1 = rayParams.padding2 = rayParams.padding3 = 0.0f;
#endif
if(inflateT)
rayParams.mInflate = inflate;
precomputeRayData(&rayParams, rayOrig, rayDir, maxDist);
#else
BPRayAABBTest test(rayOrig, rayDir, maxDist, inflateT ? inflate : PxVec3(0.0f));
#endif
#ifdef FREE_PRUNER_SIZE
BucketPrunerRaycastAdapter pcb(pcbArgName, core.mFreeObjects, core.mFreeTransforms);
for(PxU32 i=0;i<core.mNbFree;i++)
{
BucketBox tmp;
tmp.mCenter = core.mFreeBounds[i].getCenter();
tmp.mExtents = core.mFreeBounds[i].getExtents();
#ifdef CAN_USE_MOVEMASK
if(segmentAABB<inflateT>(tmp, &rayParams))
#else
if(segmentAABB<inflateT>(tmp, test))
#endif
{
if(!pcb.invoke(maxDist, i))
return false;
}
}
#endif
if(!nb)
return true;
#ifdef CAN_USE_MOVEMASK
if(!segmentAABB<inflateT>(core.mGlobalBox, &rayParams))
return true;
#else
if(!segmentAABB<inflateT>(core.mGlobalBox, test))
return true;
#endif
const PxU32 sortAxis = core.mSortAxis;
float rayMinLimit, rayMaxLimit;
if(inflateT)
computeRayLimits(rayMinLimit, rayMaxLimit, rayOrig, rayDir, maxDist, inflate, sortAxis);
else
computeRayLimits(rayMinLimit, rayMaxLimit, rayOrig, rayDir, maxDist, sortAxis);
const PxU32* binaryMinLimit = reinterpret_cast<const PxU32*>(&rayMinLimit);
const PxU32* binaryMaxLimit = reinterpret_cast<const PxU32*>(&rayMaxLimit);
PxU32 rayMinLimitInt = encodeFloat(binaryMinLimit[0]);
PxU32 rayMaxLimitInt = encodeFloat(binaryMaxLimit[0]);
/*
float rayMinLimitX, rayMaxLimitX;
if(inflateT)
computeRayLimits(rayMinLimitX, rayMaxLimitX, rayOrig, rayDir, maxDist, inflate, 0);
else
computeRayLimits(rayMinLimitX, rayMaxLimitX, rayOrig, rayDir, maxDist, 0);
PxU32 rayMinLimitIntX = encodeFloat(PX_IR(rayMinLimitX));
PxU32 rayMaxLimitIntX = encodeFloat(PX_IR(rayMaxLimitX));
*/
float currentDist = maxDist;
#ifdef NODE_SORT
const PxU32 dirIndex = computeDirMask(rayDir);
PxU32 orderi = core.mLevel1.mOrder[dirIndex];
// PxU32 orderi = sort(core.mLevel1, rayDir);
for(PxU32 i_=0;i_<5;i_++)
{
const PxU32 i = orderi&7; orderi>>=3;
#else
for(PxU32 i=0;i<5;i++)
{
#endif
#ifdef CAN_USE_MOVEMASK
if(core.mLevel1.mCounters[i] && segmentAABB<inflateT>(core.mLevel1.mBucketBox[i], &rayParams))
#else
if(core.mLevel1.mCounters[i] && segmentAABB<inflateT>(core.mLevel1.mBucketBox[i], test))
// if(core.mLevel1.mCounters[i] && segmentAABB<inflateT>(core.mLevel1.mBucketBox[i], test, rayMinLimitIntX, rayMaxLimitIntX))
#endif
{
#ifdef NODE_SORT
PxU32 orderj = core.mLevel2[i].mOrder[dirIndex];
// PxU32 orderj = sort(core.mLevel2[i], rayDir);
for(PxU32 j_=0;j_<5;j_++)
{
const PxU32 j = orderj&7; orderj>>=3;
#else
for(PxU32 j=0;j<5;j++)
{
#endif
#ifdef CAN_USE_MOVEMASK
if(core.mLevel2[i].mCounters[j] && segmentAABB<inflateT>(core.mLevel2[i].mBucketBox[j], &rayParams))
#else
if(core.mLevel2[i].mCounters[j] && segmentAABB<inflateT>(core.mLevel2[i].mBucketBox[j], test))
// if(core.mLevel2[i].mCounters[j] && segmentAABB<inflateT>(core.mLevel2[i].mBucketBox[j], test, rayMinLimitIntX, rayMaxLimitIntX))
#endif
{
const BucketPrunerNode& parent = core.mLevel3[i][j];
const PxU32 parentOffset = core.mLevel1.mOffsets[i] + core.mLevel2[i].mOffsets[j];
#ifdef NODE_SORT
PxU32 orderk = parent.mOrder[dirIndex];
// PxU32 orderk = sort(parent, rayDir);
for(PxU32 k_=0;k_<5;k_++)
{
const PxU32 k = orderk&7; orderk>>=3;
#else
for(PxU32 k=0;k<5;k++)
{
#endif
const PxU32 nbInBucket = parent.mCounters[k];
#ifdef CAN_USE_MOVEMASK
if(nbInBucket && segmentAABB<inflateT>(parent.mBucketBox[k], &rayParams))
#else
if(nbInBucket && segmentAABB<inflateT>(parent.mBucketBox[k], test))
// if(nbInBucket && segmentAABB<inflateT>(parent.mBucketBox[k], test, rayMinLimitIntX, rayMaxLimitIntX))
#endif
{
const PxU32 offset = parentOffset + parent.mOffsets[k];
const bool again = processBucket<inflateT>( nbInBucket, core.mSortedWorldBoxes, core.mSortedObjects,
core.mSortedTransforms,
offset, core.mSortedNb,
rayOrig, rayDir, currentDist,
#ifdef CAN_USE_MOVEMASK
&rayParams,
#else
test, inflate,
#endif
pcbArgName,
rayMinLimitInt, rayMaxLimitInt,
sortAxis);
if(!again)
return false;
}
}
}
}
}
}
maxDist = currentDist;
return true;
}
bool BucketPrunerCore::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcb) const
{
return ::stab<0>(*this, pcb, origin, unitDir, inOutDistance, PxVec3(0.0f));
}
bool BucketPrunerCore::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcb) const
{
const PxVec3 extents = queryVolume.getPrunerInflatedWorldAABB().getExtents();
return ::stab<1>(*this, pcb, queryVolume.getPrunerInflatedWorldAABB().getCenter(), unitDir, inOutDistance, extents);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// PT: TODO: decoupling the pruner callback revealed quite a bug here: we call this processBucket function with an inflateT param,
// which is re-interpreted as "doAssert" for overlaps! What happened here?
template<bool doAssert, typename Test>
static PX_FORCE_INLINE bool processBucket( PxU32 nb, const BucketBox* PX_RESTRICT baseBoxes, const PrunerPayload* PX_RESTRICT baseObjects,
const PxTransform* PX_RESTRICT baseTransforms,
PxU32 offset, PxU32 totalAllocated,
const Test& test, PrunerOverlapCallback& pcbArgName,
PxU32 minLimitInt, PxU32 maxLimitInt)
{
PX_UNUSED(totalAllocated);
const BucketBox* PX_RESTRICT boxes = baseBoxes + offset;
BucketPrunerOverlapAdapter pcb(pcbArgName, baseObjects + offset, baseTransforms + offset);
for(PxU32 i=0;i<nb;i++)
{
const BucketBox& currentBox = *boxes++;
if(currentBox.mData1<minLimitInt)
{
if(doAssert)
PX_ASSERT(!test(currentBox));
continue;
}
if(currentBox.mData0>maxLimitInt)
{
if(doAssert)
PX_ASSERT(!test(currentBox));
return true;
}
if(test(currentBox))
{
if(!pcb.invoke(i))
return false;
}
}
return true;
}
template<typename Test, bool isPrecise>
class BucketPrunerOverlapTraversal
{
public:
PX_FORCE_INLINE BucketPrunerOverlapTraversal() {}
/*PX_FORCE_INLINE*/ bool operator()(const BucketPrunerCore& core, const Test& test, PrunerOverlapCallback& pcbArgName, const PxBounds3& cullBox) const
{
#ifdef FREE_PRUNER_SIZE
BucketPrunerOverlapAdapter pcb(pcbArgName, core.mFreeObjects, core.mFreeTransforms);
for(PxU32 i=0;i<core.mNbFree;i++)
{
if(test(core.mFreeBounds[i]))
{
if(!pcb.invoke(i))
return false;
}
}
#endif
const PxU32 nb = core.mSortedNb;
if(!nb)
return true;
#ifdef BRUTE_FORCE_LIMIT
if(nb<=BRUTE_FORCE_LIMIT)
{
for(PxU32 i=0;i<nb;i++)
{
if(test(core.mSortedWorldBoxes[i]))
{
PxReal dist = -1.0f; // no distance for overlaps
if(!pcb.invoke(dist, core.mSortedObjects[i]))
return false;
}
}
return true;
}
#endif
if(!test(core.mGlobalBox))
return true;
const PxU32 sortAxis = core.mSortAxis;
const float boxMinLimit = cullBox.minimum[sortAxis];
const float boxMaxLimit = cullBox.maximum[sortAxis];
const PxU32* binaryMinLimit = reinterpret_cast<const PxU32*>(&boxMinLimit);
const PxU32* binaryMaxLimit = reinterpret_cast<const PxU32*>(&boxMaxLimit);
const PxU32 rayMinLimitInt = encodeFloat(binaryMinLimit[0]);
const PxU32 rayMaxLimitInt = encodeFloat(binaryMaxLimit[0]);
for(PxU32 i=0;i<5;i++)
{
if(core.mLevel1.mCounters[i] && test(core.mLevel1.mBucketBox[i]))
{
for(PxU32 j=0;j<5;j++)
{
if(core.mLevel2[i].mCounters[j] && test(core.mLevel2[i].mBucketBox[j]))
{
for(PxU32 k=0;k<5;k++)
{
const PxU32 nbInBucket = core.mLevel3[i][j].mCounters[k];
if(nbInBucket && test(core.mLevel3[i][j].mBucketBox[k]))
{
const PxU32 offset = core.mLevel1.mOffsets[i] + core.mLevel2[i].mOffsets[j] + core.mLevel3[i][j].mOffsets[k];
if(!processBucket<isPrecise>(nbInBucket, core.mSortedWorldBoxes, core.mSortedObjects,
core.mSortedTransforms,
offset, core.mSortedNb, test, pcbArgName, rayMinLimitInt, rayMaxLimitInt))
return false;
}
}
}
}
}
}
return true;
}
};
///////////////////////////////////////////////////////////////////////////////
#ifdef CAN_USE_MOVEMASK
PX_FORCE_INLINE PxU32 BAllTrue3_R(const BoolV a)
{
const PxI32 moveMask = _mm_movemask_ps(a);
return PxU32((moveMask & 0x7) == (0x7));
}
#endif
#ifdef USE_SIMD
struct SphereAABBTest_SIMD
{
PX_FORCE_INLINE SphereAABBTest_SIMD(const Sphere& sphere) :
#ifdef CAN_USE_MOVEMASK
mCenter (V4LoadU(&sphere.center.x)),
#else
mCenter (V3LoadU(sphere.center)),
#endif
mRadius2(FLoad(sphere.radius * sphere.radius))
{}
PX_FORCE_INLINE PxIntBool operator()(const BucketBox& box) const
{
#ifdef CAN_USE_MOVEMASK
const Vec4V boxCenter = AlignedLoad(&box.mCenter.x);
const Vec4V boxExtents = AlignedLoad(&box.mExtents.x);
//
const Vec4V offset = V4Sub(mCenter, boxCenter);
const Vec4V closest = V4Clamp(offset, V4Neg(boxExtents), boxExtents);
const Vec4V d = V4Sub(offset, closest);
const FloatV dot = V4Dot3(d,d);
return PxIntBool(BAllTrue3_R(FIsGrtrOrEq(mRadius2, dot)));
#else
const Vec3V boxCenter = V3LoadU(box.mCenter);
const Vec3V boxExtents = V3LoadU(box.mExtents);
//
const Vec3V offset = V3Sub(mCenter, boxCenter);
const Vec3V closest = V3Clamp(offset, V3Neg(boxExtents), boxExtents);
const Vec3V d = V3Sub(offset, closest);
return PxIntBool(BAllEqTTTT(FIsGrtrOrEq(mRadius2, V3Dot(d, d))));
#endif
}
PX_FORCE_INLINE PxIntBool operator()(const PxBounds3& bounds) const
{
BucketBox tmp;
tmp.mCenter = bounds.getCenter();
tmp.mExtents = bounds.getExtents();
return (*this)(tmp);
}
private:
SphereAABBTest_SIMD& operator=(const SphereAABBTest_SIMD&);
#ifdef CAN_USE_MOVEMASK
const Vec4V mCenter;
#else
const Vec3V mCenter;
#endif
const FloatV mRadius2;
};
#else
struct SphereAABBTest_Scalar
{
PX_FORCE_INLINE SphereAABBTest_Scalar(const Sphere& sphere) :
mCenter (sphere.center),
mRadius2(sphere.radius * sphere.radius)
{}
PX_FORCE_INLINE PxIntBool operator()(const BucketBox& box) const
{
const PxVec3 minimum = box.getMin();
const PxVec3 maximum = box.getMax();
float d = 0.0f;
//find the square of the distance
//from the sphere to the box
for(PxU32 i=0;i<3;i++)
{
if(mCenter[i]<minimum[i])
{
const float s = mCenter[i] - minimum[i];
d += s*s;
}
else if(mCenter[i]>maximum[i])
{
const float s = mCenter[i] - maximum[i];
d += s*s;
}
}
return d <= mRadius2;
}
private:
SphereAABBTest_Scalar& operator=(const SphereAABBTest_Scalar&);
const PxVec3 mCenter;
float mRadius2;
};
#endif
#ifdef USE_SIMD
typedef SphereAABBTest_SIMD BucketPrunerSphereAABBTest;
#else
typedef SphereAABBTest_Scalar BucketPrunerSphereAABBTest;
#endif
///////////////////////////////////////////////////////////////////////////////
struct BucketPrunerAABBAABBTest
{
PX_FORCE_INLINE BucketPrunerAABBAABBTest(const PxBounds3& queryBox) : mBox(queryBox) {}
PX_FORCE_INLINE PxIntBool operator()(const BucketBox& box) const
{
// PT: we don't use PxBounds3::intersects() because isValid() asserts on our empty boxes!
const PxVec3 bucketMin = box.getMin();
const PxVec3 bucketMax = box.getMax();
return !(mBox.minimum.x > bucketMax.x || bucketMin.x > mBox.maximum.x ||
mBox.minimum.y > bucketMax.y || bucketMin.y > mBox.maximum.y ||
mBox.minimum.z > bucketMax.z || bucketMin.z > mBox.maximum.z);
}
PX_FORCE_INLINE PxIntBool operator()(const PxBounds3& bounds) const
{
// PT: we don't use PxBounds3::intersects() because isValid() asserts on our empty boxes!
const PxVec3& bucketMin = bounds.minimum;
const PxVec3& bucketMax = bounds.maximum;
return !(mBox.minimum.x > bucketMax.x || bucketMin.x > mBox.maximum.x ||
mBox.minimum.y > bucketMax.y || bucketMin.y > mBox.maximum.y ||
mBox.minimum.z > bucketMax.z || bucketMin.z > mBox.maximum.z);
}
private:
BucketPrunerAABBAABBTest& operator=(const BucketPrunerAABBAABBTest&);
const PxBounds3 mBox;
};
/*struct BucketPrunerAABBAABBTest_SIMD
{
PX_FORCE_INLINE BucketPrunerAABBAABBTest_SIMD(const PxBounds3& b)
: mCenter(V3LoadU(b.getCenter()))
, mExtents(V3LoadU(b.getExtents()))
{}
PX_FORCE_INLINE PxIntBool operator()(const BucketBox& box) const
{
return V3AllGrtrOrEq(V3Add(mExtents, AlignedLoad(&box.mExtents.x)), V3Abs(V3Sub(AlignedLoad(&box.mCenter.x), mCenter)));
}
private:
BucketPrunerAABBAABBTest_SIMD& operator=(const BucketPrunerAABBAABBTest_SIMD&);
const Vec3V mCenter, mExtents;
};*/
///////////////////////////////////////////////////////////////////////////////
#ifdef USE_SIMD
struct OBBAABBTest_SIMD
{
OBBAABBTest_SIMD(const PxMat33& rotation, const PxVec3& translation, const PxVec3& extents)
{
const Vec3V eps = V3Load(1e-6f);
mT = V3LoadU(translation);
mExtents = V3LoadU(extents);
// storing the transpose matrices yields a simpler SIMD test
mRT = Mat33V_From_PxMat33(rotation.getTranspose());
mART = Mat33V(V3Add(V3Abs(mRT.col0), eps), V3Add(V3Abs(mRT.col1), eps), V3Add(V3Abs(mRT.col2), eps));
mBB_xyz = M33TrnspsMulV3(mART, mExtents);
/* if(fullTest)
{
const Vec3V eYZX = V3PermYZX(mExtents), eZXY = V3PermZXY(mExtents);
mBB_123 = V3MulAdd(eYZX, V3PermZXY(mART.col0), V3Mul(eZXY, V3PermYZX(mART.col0)));
mBB_456 = V3MulAdd(eYZX, V3PermZXY(mART.col1), V3Mul(eZXY, V3PermYZX(mART.col1)));
mBB_789 = V3MulAdd(eYZX, V3PermZXY(mART.col2), V3Mul(eZXY, V3PermYZX(mART.col2)));
}*/
}
PX_FORCE_INLINE PxIntBool operator()(const BucketBox& box) const
{
const Vec3V extentsV = V3LoadU(box.mExtents);
const Vec3V t = V3Sub(mT, V3LoadU(box.mCenter));
// class I - axes of AABB
if(V3OutOfBounds(t, V3Add(extentsV, mBB_xyz)))
return PxIntFalse;
const Vec3V rX = mRT.col0, rY = mRT.col1, rZ = mRT.col2;
const Vec3V arX = mART.col0, arY = mART.col1, arZ = mART.col2;
const FloatV eX = V3GetX(extentsV), eY = V3GetY(extentsV), eZ = V3GetZ(extentsV);
const FloatV tX = V3GetX(t), tY = V3GetY(t), tZ = V3GetZ(t);
// class II - axes of OBB
{
const Vec3V v = V3ScaleAdd(rZ, tZ, V3ScaleAdd(rY, tY, V3Scale(rX, tX)));
const Vec3V v2 = V3ScaleAdd(arZ, eZ, V3ScaleAdd(arY, eY, V3ScaleAdd(arX, eX, mExtents)));
if(V3OutOfBounds(v, v2))
return PxIntFalse;
}
// if(!fullTest)
return PxIntTrue;
/* // class III - edge cross products. Almost all OBB tests early-out with type I or type II,
// so early-outs here probably aren't useful (TODO: profile)
const Vec3V va = V3NegScaleSub(rZ, tY, V3Scale(rY, tZ));
const Vec3V va2 = V3ScaleAdd(arY, eZ, V3ScaleAdd(arZ, eY, mBB_123));
const BoolV ba = BOr(V3IsGrtr(va, va2), V3IsGrtr(V3Neg(va2), va));
const Vec3V vb = V3NegScaleSub(rX, tZ, V3Scale(rZ, tX));
const Vec3V vb2 = V3ScaleAdd(arX, eZ, V3ScaleAdd(arZ, eX, mBB_456));
const BoolV bb = BOr(V3IsGrtr(vb, vb2), V3IsGrtr(V3Neg(vb2), vb));
const Vec3V vc = V3NegScaleSub(rY, tX, V3Scale(rX, tY));
const Vec3V vc2 = V3ScaleAdd(arX, eY, V3ScaleAdd(arY, eX, mBB_789));
const BoolV bc = BOr(V3IsGrtr(vc, vc2), V3IsGrtr(V3Neg(vc2), vc));
return BAllEq(BOr(ba, BOr(bb,bc)), BFFFF());*/
}
PX_FORCE_INLINE PxIntBool operator()(const PxBounds3& bounds) const
{
BucketBox tmp;
tmp.mCenter = bounds.getCenter();
tmp.mExtents = bounds.getExtents();
return (*this)(tmp);
}
Vec3V mExtents; // extents of OBB
Vec3V mT; // translation of OBB
Mat33V mRT; // transpose of rotation matrix of OBB
Mat33V mART; // transpose of mRT, padded by epsilon
Vec3V mBB_xyz; // extents of OBB along coordinate axes
/* Vec3V mBB_123; // projections of extents onto edge-cross axes
Vec3V mBB_456;
Vec3V mBB_789;*/
};
#else
struct OBBAABBTest_Scalar
{
OBBAABBTest_Scalar(const PxMat33& rotation, const PxVec3& translation, const PxVec3& extents)
{
mR = rotation;
mT = translation;
mExtents = extents;
const PxVec3 eps(1e-6f);
mAR = PxMat33(mR[0].abs() + eps, mR[1].abs() + eps, mR[2].abs() + eps); // Epsilon prevents floating-point inaccuracies (strategy borrowed from RAPID)
mBB_xyz = mAR.transform(mExtents); // Precompute box-box data - Courtesy of Erwin de Vries
/* PxReal ex = mExtents.x, ey = mExtents.y, ez = mExtents.z;
mBB_1 = ey*mAR[2].x + ez*mAR[1].x; mBB_2 = ez*mAR[0].x + ex*mAR[2].x; mBB_3 = ex*mAR[1].x + ey*mAR[0].x;
mBB_4 = ey*mAR[2].y + ez*mAR[1].y; mBB_5 = ez*mAR[0].y + ex*mAR[2].y; mBB_6 = ex*mAR[1].y + ey*mAR[0].y;
mBB_7 = ey*mAR[2].z + ez*mAR[1].z; mBB_8 = ez*mAR[0].z + ex*mAR[2].z; mBB_9 = ex*mAR[1].z + ey*mAR[0].z;*/
}
PX_FORCE_INLINE PxIntBool operator()(const BucketBox& box) const
{
const PxVec3& c = box.mCenter;
const PxVec3& e = box.mExtents;
const PxVec3 T = mT - c;
// Class I : A's basis vectors
if(PxAbs(T.x) > e.x + mBB_xyz.x) return PxIntFalse;
if(PxAbs(T.y) > e.y + mBB_xyz.y) return PxIntFalse;
if(PxAbs(T.z) > e.z + mBB_xyz.z) return PxIntFalse;
// Class II : B's basis vectors
if(PxAbs(T.dot(mR[0])) > e.dot(mAR[0]) + mExtents.x) return PxIntFalse;
if(PxAbs(T.dot(mR[1])) > e.dot(mAR[1]) + mExtents.y) return PxIntFalse;
if(PxAbs(T.dot(mR[2])) > e.dot(mAR[2]) + mExtents.z) return PxIntFalse;
// Class III : 9 cross products
if(0)
{
if(PxAbs(T.z*mR[0].y - T.y*mR[0].z) > e.y*mAR[0].z + e.z*mAR[0].y + mBB_1) return PxIntFalse; // L = A0 x B0
if(PxAbs(T.z*mR[1].y - T.y*mR[1].z) > e.y*mAR[1].z + e.z*mAR[1].y + mBB_2) return PxIntFalse; // L = A0 x B1
if(PxAbs(T.z*mR[2].y - T.y*mR[2].z) > e.y*mAR[2].z + e.z*mAR[2].y + mBB_3) return PxIntFalse; // L = A0 x B2
if(PxAbs(T.x*mR[0].z - T.z*mR[0].x) > e.x*mAR[0].z + e.z*mAR[0].x + mBB_4) return PxIntFalse; // L = A1 x B0
if(PxAbs(T.x*mR[1].z - T.z*mR[1].x) > e.x*mAR[1].z + e.z*mAR[1].x + mBB_5) return PxIntFalse; // L = A1 x B1
if(PxAbs(T.x*mR[2].z - T.z*mR[2].x) > e.x*mAR[2].z + e.z*mAR[2].x + mBB_6) return PxIntFalse; // L = A1 x B2
if(PxAbs(T.y*mR[0].x - T.x*mR[0].y) > e.x*mAR[0].y + e.y*mAR[0].x + mBB_7) return PxIntFalse; // L = A2 x B0
if(PxAbs(T.y*mR[1].x - T.x*mR[1].y) > e.x*mAR[1].y + e.y*mAR[1].x + mBB_8) return PxIntFalse; // L = A2 x B1
if(PxAbs(T.y*mR[2].x - T.x*mR[2].y) > e.x*mAR[2].y + e.y*mAR[2].x + mBB_9) return PxIntFalse; // L = A2 x B2
}
return PxIntTrue;
}
private:
PxMat33 mR; // rotation matrix
PxMat33 mAR; // absolute rotation matrix
PxVec3 mT; // translation from obb space to model space
PxVec3 mExtents;
PxVec3 mBB_xyz;
float mBB_1, mBB_2, mBB_3;
float mBB_4, mBB_5, mBB_6;
float mBB_7, mBB_8, mBB_9;
};
#endif
#ifdef USE_SIMD
typedef OBBAABBTest_SIMD BucketPrunerOBBAABBTest;
#else
typedef OBBAABBTest_Scalar BucketPrunerOBBAABBTest;
#endif
///////////////////////////////////////////////////////////////////////////////
bool BucketPrunerCore::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcb) const
{
PX_ASSERT(!mDirty);
bool again = true;
const PxBounds3& cullBox = queryVolume.getPrunerInflatedWorldAABB();
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const BucketPrunerOverlapTraversal<BucketPrunerOBBAABBTest, false> overlap;
again = overlap(*this,
BucketPrunerOBBAABBTest(
queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerWorldPos(),
queryVolume.getPrunerBoxGeomExtentsInflated()),
pcb, cullBox);
}
else
{
const BucketPrunerOverlapTraversal<BucketPrunerAABBAABBTest, true> overlap;
again = overlap(*this, BucketPrunerAABBAABBTest(cullBox), pcb, cullBox);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const BucketPrunerOverlapTraversal<BucketPrunerOBBAABBTest, false> overlap;
again = overlap(*this,
BucketPrunerOBBAABBTest(
queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerWorldPos(),
queryVolume.getPrunerBoxGeomExtentsInflated()),
pcb, cullBox);
}
break;
case PxGeometryType::eSPHERE:
{
const Sphere& sphere = queryVolume.getGuSphere();
const PxVec3 sphereExtents(sphere.radius);
const BucketPrunerOverlapTraversal<BucketPrunerSphereAABBTest, true> overlap;
again = overlap(*this, BucketPrunerSphereAABBTest(sphere), pcb, cullBox);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const BucketPrunerOverlapTraversal<BucketPrunerOBBAABBTest, false> overlap;
again = overlap(*this,
BucketPrunerOBBAABBTest(
queryVolume.getPrunerWorldRot33(), queryVolume.getPrunerWorldPos(),
queryVolume.getPrunerBoxGeomExtentsInflated()),
pcb, cullBox);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
return again;
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerCore::getGlobalBounds(PxBounds3& bounds) const
{
// PT: TODO: refactor with similar code above in the file
const Vec4V centerV = V4LoadU(&mGlobalBox.mCenter.x);
const Vec4V extentsV = V4LoadU(&mGlobalBox.mExtents.x);
Vec4V minV = V4Sub(centerV, extentsV);
Vec4V maxV = V4Add(centerV, extentsV);
#ifdef FREE_PRUNER_SIZE
PxU32 nbFree = mNbFree;
if(nbFree)
{
const PxBounds3* freeBounds = mFreeBounds;
while(nbFree--)
{
minV = V4Min(minV, V4LoadU(&freeBounds->minimum.x));
maxV = V4Max(maxV, V4LoadU(&freeBounds->maximum.x));
freeBounds++;
}
}
#endif
StoreBounds(bounds, minV, maxV);
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerCore::shiftOrigin(const PxVec3& shift)
{
#ifdef FREE_PRUNER_SIZE
for(PxU32 i=0;i<mNbFree;i++)
{
mFreeBounds[i].minimum -= shift;
mFreeBounds[i].maximum -= shift;
mFreeTransforms[i].p -= shift;
}
#endif
const PxU32 nb = mCoreNbObjects;
//if (nb)
{
mGlobalBox.mCenter -= shift;
#ifdef _DEBUG
mGlobalBox.mDebugMin -= shift[mSortAxis];
#endif
encodeBoxMinMax(mGlobalBox, mSortAxis);
for(PxU32 i=0; i<nb; i++)
{
mCoreBoxes[i].minimum -= shift;
mCoreBoxes[i].maximum -= shift;
mCoreTransforms[i].p -= shift;
}
for(PxU32 i=0; i<mSortedNb; i++)
{
mSortedWorldBoxes[i].mCenter -= shift;
#ifdef _DEBUG
mSortedWorldBoxes[i].mDebugMin -= shift[mSortAxis];
#endif
encodeBoxMinMax(mSortedWorldBoxes[i], mSortAxis);
mSortedTransforms[i].p -= shift;
}
for(PxU32 i=0; i < 5; i++)
mLevel1.mBucketBox[i].mCenter -= shift;
for(PxU32 i=0; i < 5; i++)
for(PxU32 j=0; j < 5; j++)
mLevel2[i].mBucketBox[j].mCenter -= shift;
for(PxU32 i=0; i < 5; i++)
for(PxU32 j=0; j < 5; j++)
for(PxU32 k=0; k < 5; k++)
mLevel3[i][j].mBucketBox[k].mCenter -= shift;
}
}
///////////////////////////////////////////////////////////////////////////////
static void visualize(PxRenderOutput& out, const BucketBox& bounds)
{
Cm::renderOutputDebugBox(out, PxBounds3(bounds.getMin(), bounds.getMax()));
}
void BucketPrunerCore::visualize(PxRenderOutput& out, PxU32 color) const
{
const PxTransform idt = PxTransform(PxIdentity);
out << idt;
out << color;
::visualize(out, mGlobalBox);
for(PxU32 i=0;i<5;i++)
{
if(!mLevel1.mCounters[i])
continue;
::visualize(out, mLevel1.mBucketBox[i]);
for(PxU32 j=0;j<5;j++)
{
if(!mLevel2[i].mCounters[j])
continue;
::visualize(out, mLevel2[i].mBucketBox[j]);
for(PxU32 k=0;k<5;k++)
{
if(!mLevel3[i][j].mCounters[k])
continue;
::visualize(out, mLevel3[i][j].mBucketBox[k]);
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
BucketPruner::BucketPruner(PxU64 contextID) : mPool(contextID, TRANSFORM_CACHE_GLOBAL)
{
}
BucketPruner::~BucketPruner()
{
}
static PX_FORCE_INLINE void setExternalMemory(BucketPrunerCore& core, PruningPool& pool)
{
core.mDirty = true;
core.setExternalMemory(pool.getNbActiveObjects(), pool.getCurrentWorldBoxes(), pool.getObjects(), pool.getTransforms());
}
bool BucketPruner::addObjects(PrunerHandle* results, const PxBounds3* bounds, const PrunerPayload* data, const PxTransform* transforms, PxU32 count, bool)
{
if(!count)
return true;
const PxU32 valid = mPool.addObjects(results, bounds, data, transforms, count);
::setExternalMemory(mCore, mPool);
return valid == count;
}
void BucketPruner::removeObjects(const PrunerHandle* handles, PxU32 count, PrunerPayloadRemovalCallback* removalCallback)
{
if(!count)
return;
for(PxU32 i=0;i<count;i++)
mPool.removeObject(handles[i], removalCallback);
::setExternalMemory(mCore, mPool);
}
void BucketPruner::updateObjects(const PrunerHandle* handles, PxU32 count, float inflation, const PxU32* boundsIndices, const PxBounds3* newBounds, const PxTransform32* newTransforms)
{
if(!count)
return;
if(handles && boundsIndices && newBounds)
mPool.updateAndInflateBounds(handles, boundsIndices, newBounds, newTransforms, count, inflation);
::setExternalMemory(mCore, mPool);
}
void BucketPruner::purge()
{
}
void BucketPruner::commit()
{
mCore.build();
}
void BucketPruner::merge(const void*)
{
// merge not implemented for bucket pruner
}
void BucketPruner::shiftOrigin(const PxVec3& shift)
{
mCore.shiftOrigin(shift);
}
bool BucketPruner::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcb) const
{
PX_ASSERT(!mCore.mDirty);
if(mCore.mDirty)
return true; // it may crash otherwise
return mCore.sweep(queryVolume, unitDir, inOutDistance, pcb);
}
bool BucketPruner::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcb) const
{
PX_ASSERT(!mCore.mDirty);
if(mCore.mDirty)
return true; // it may crash otherwise
return mCore.overlap(queryVolume, pcb);
}
bool BucketPruner::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcb) const
{
PX_ASSERT(!mCore.mDirty);
if(mCore.mDirty)
return true; // it may crash otherwise
return mCore.raycast(origin, unitDir, inOutDistance, pcb);
}
void BucketPruner::visualize(PxRenderOutput& out, PxU32 primaryColor, PxU32 /*secondaryColor*/) const
{
mCore.visualize(out, primaryColor);
}
void BucketPruner::getGlobalBounds(PxBounds3& bounds) const
{
mCore.getGlobalBounds(bounds);
}
#define MBP_ALLOC(x) PX_ALLOC(x, "BucketPruner")
#define MBP_ALLOC_TMP(x) PX_ALLOC(x, "BucketPruner")
#define MBP_FREE(x) PX_FREE(x)
#define INVALID_ID 0xffffffff
#ifndef USE_REGULAR_HASH_MAP
static PX_FORCE_INLINE bool differentPair(const BucketPrunerPair& p, const PrunerPayload& data)
{
const bool same = p.mData == data;
return !same;
}
///////////////////////////////////////////////////////////////////////////////
BucketPrunerMap::BucketPrunerMap() :
mHashSize (0),
mMask (0),
mNbActivePairs (0),
mHashTable (NULL),
mNext (NULL),
mActivePairs (NULL),
mReservedMemory (0)
{
}
///////////////////////////////////////////////////////////////////////////////
BucketPrunerMap::~BucketPrunerMap()
{
purge();
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerMap::purge()
{
MBP_FREE(mNext);
MBP_FREE(mActivePairs);
MBP_FREE(mHashTable);
mHashSize = 0;
mMask = 0;
mNbActivePairs = 0;
}
///////////////////////////////////////////////////////////////////////////////
const BucketPrunerPair* BucketPrunerMap::findPair(const PrunerPayload& payload) const
{
if(!mHashTable)
return NULL; // Nothing has been allocated yet
// Compute hash value for this pair
const PxU32 hashValue = PxComputeHash(payload) & mMask;
const BucketPrunerPair* PX_RESTRICT activePairs = mActivePairs;
const PxU32* PX_RESTRICT next = mNext;
// Look for it in the table
PxU32 offset = mHashTable[hashValue];
while(offset!=INVALID_ID && differentPair(activePairs[offset], payload))
{
offset = next[offset]; // Better to have a separate array for this
}
if(offset==INVALID_ID)
return NULL;
PX_ASSERT(offset<mNbActivePairs);
// Match mActivePairs[offset] => the pair is persistent
return &activePairs[offset];
}
// Internal version saving hash computation
PX_FORCE_INLINE BucketPrunerPair* BucketPrunerMap::findPair(const PrunerPayload& payload, PxU32 hashValue) const
{
if(!mHashTable)
return NULL; // Nothing has been allocated yet
BucketPrunerPair* PX_RESTRICT activePairs = mActivePairs;
const PxU32* PX_RESTRICT next = mNext;
// Look for it in the table
PxU32 offset = mHashTable[hashValue];
while(offset!=INVALID_ID && differentPair(activePairs[offset], payload))
{
offset = next[offset]; // Better to have a separate array for this
}
if(offset==INVALID_ID)
return NULL;
PX_ASSERT(offset<mNbActivePairs);
// Match mActivePairs[offset] => the pair is persistent
return &activePairs[offset];
}
///////////////////////////////////////////////////////////////////////////////
BucketPrunerPair* BucketPrunerMap::addPair(const PrunerPayload& payload, PxU32 coreIndex, PxU32 timeStamp)
{
PxU32 hashValue = PxComputeHash(payload) & mMask;
{
BucketPrunerPair* PX_RESTRICT p = findPair(payload, hashValue);
if(p)
{
PX_ASSERT(p->mCoreIndex==coreIndex);
PX_ASSERT(p->mTimeStamp==timeStamp);
return p; // Persistent pair
}
}
// This is a new pair
if(mNbActivePairs >= mHashSize)
{
// Get more entries
mHashSize = PxNextPowerOfTwo(mNbActivePairs+1);
mMask = mHashSize-1;
reallocPairs();
// Recompute hash value with new hash size
hashValue = PxComputeHash(payload) & mMask; // ### redundant hash computation here?
}
BucketPrunerPair* PX_RESTRICT p = &mActivePairs[mNbActivePairs];
p->mData = payload;
p->mCoreIndex = coreIndex;
p->mTimeStamp = timeStamp;
mNext[mNbActivePairs] = mHashTable[hashValue];
mHashTable[hashValue] = mNbActivePairs++;
return p;
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerMap::removePairInternal(const PrunerPayload& /*payload*/, PxU32 hashValue, PxU32 pairIndex)
{
// Walk the hash table to fix mNext
{
PxU32 offset = mHashTable[hashValue];
PX_ASSERT(offset!=INVALID_ID);
PxU32 previous=INVALID_ID;
while(offset!=pairIndex)
{
previous = offset;
offset = mNext[offset];
}
// Let us go/jump us
if(previous!=INVALID_ID)
{
PX_ASSERT(mNext[previous]==pairIndex);
mNext[previous] = mNext[pairIndex];
}
// else we were the first
else mHashTable[hashValue] = mNext[pairIndex];
// we're now free to reuse mNext[pairIndex] without breaking the list
}
#if PX_DEBUG
mNext[pairIndex]=INVALID_ID;
#endif
// Invalidate entry
// Fill holes
if(1)
{
// 1) Remove last pair
const PxU32 lastPairIndex = mNbActivePairs-1;
if(lastPairIndex==pairIndex)
{
mNbActivePairs--;
}
else
{
const BucketPrunerPair* last = &mActivePairs[lastPairIndex];
const PxU32 lastHashValue = PxComputeHash(last->mData) & mMask;
// Walk the hash table to fix mNext
PxU32 offset = mHashTable[lastHashValue];
PX_ASSERT(offset!=INVALID_ID);
PxU32 previous=INVALID_ID;
while(offset!=lastPairIndex)
{
previous = offset;
offset = mNext[offset];
}
// Let us go/jump us
if(previous!=INVALID_ID)
{
PX_ASSERT(mNext[previous]==lastPairIndex);
mNext[previous] = mNext[lastPairIndex];
}
// else we were the first
else mHashTable[lastHashValue] = mNext[lastPairIndex];
// we're now free to reuse mNext[lastPairIndex] without breaking the list
#if PX_DEBUG
mNext[lastPairIndex]=INVALID_ID;
#endif
// Don't invalidate entry since we're going to shrink the array
// 2) Re-insert in free slot
mActivePairs[pairIndex] = mActivePairs[lastPairIndex];
#if PX_DEBUG
PX_ASSERT(mNext[pairIndex]==INVALID_ID);
#endif
mNext[pairIndex] = mHashTable[lastHashValue];
mHashTable[lastHashValue] = pairIndex;
mNbActivePairs--;
}
}
}
///////////////////////////////////////////////////////////////////////////////
bool BucketPrunerMap::removePair(const PrunerPayload& payload, PxU32& coreIndex, PxU32& timeStamp)
{
const PxU32 hashValue = PxComputeHash(payload) & mMask;
const BucketPrunerPair* p = findPair(payload, hashValue);
if(!p)
return false;
PX_ASSERT(p->mData==payload);
coreIndex = p->mCoreIndex;
timeStamp = p->mTimeStamp;
removePairInternal(payload, hashValue, getPairIndex(p));
shrinkMemory();
return true;
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerMap::shrinkMemory()
{
// Check correct memory against actually used memory
const PxU32 correctHashSize = PxNextPowerOfTwo(mNbActivePairs);
if(mHashSize==correctHashSize)
return;
if(mReservedMemory && correctHashSize < mReservedMemory)
return;
// Reduce memory used
mHashSize = correctHashSize;
mMask = mHashSize-1;
reallocPairs();
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE void storeDwords(PxU32* dest, PxU32 nb, PxU32 value)
{
while(nb--)
*dest++ = value;
}
void BucketPrunerMap::reallocPairs()
{
MBP_FREE(mHashTable);
mHashTable = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize*sizeof(PxU32)));
storeDwords(mHashTable, mHashSize, INVALID_ID);
// Get some bytes for new entries
BucketPrunerPair* newPairs = reinterpret_cast<BucketPrunerPair*>(MBP_ALLOC(mHashSize * sizeof(BucketPrunerPair)));
PX_ASSERT(newPairs);
PxU32* newNext = reinterpret_cast<PxU32*>(MBP_ALLOC(mHashSize * sizeof(PxU32)));
PX_ASSERT(newNext);
// Copy old data if needed
if(mNbActivePairs)
PxMemCopy(newPairs, mActivePairs, mNbActivePairs*sizeof(BucketPrunerPair));
// ### check it's actually needed... probably only for pairs whose hash value was cut by the and
// yeah, since hash(id0, id1) is a constant
// However it might not be needed to recompute them => only less efficient but still ok
for(PxU32 i=0;i<mNbActivePairs;i++)
{
const PxU32 hashValue = PxComputeHash(mActivePairs[i].mData) & mMask; // New hash value with new mask
newNext[i] = mHashTable[hashValue];
mHashTable[hashValue] = i;
}
// Delete old data
MBP_FREE(mNext);
MBP_FREE(mActivePairs);
// Assign new pointer
mActivePairs = newPairs;
mNext = newNext;
}
///////////////////////////////////////////////////////////////////////////////
void BucketPrunerMap::reserveMemory(PxU32 memSize)
{
if(!memSize)
return;
if(!PxIsPowerOfTwo(memSize))
memSize = PxNextPowerOfTwo(memSize);
mHashSize = memSize;
mMask = mHashSize-1;
mReservedMemory = memSize;
reallocPairs();
}
///////////////////////////////////////////////////////////////////////////////
#endif
| 87,715 | C++ | 31.130403 | 221 | 0.691581 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuIncrementalAABBPrunerCore.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INCREMENTAL_AABB_PRUNER_CORE_H
#define GU_INCREMENTAL_AABB_PRUNER_CORE_H
#include "GuPruner.h"
#include "GuIncrementalAABBTree.h"
#include "GuPruningPool.h"
#include "GuAABBTreeUpdateMap.h"
#include "foundation/PxHashMap.h"
namespace physx
{
class PxRenderOutput;
namespace Gu
{
typedef PxHashMap<PoolIndex, IncrementalAABBTreeNode*> IncrementalPrunerMap;
struct CoreTree
{
PX_FORCE_INLINE CoreTree() : timeStamp(0), tree(NULL) {}
PxU32 timeStamp;
IncrementalAABBTree* tree;
IncrementalPrunerMap mapping;
};
class IncrementalAABBPrunerCore : public PxUserAllocated
{
public:
IncrementalAABBPrunerCore(const PruningPool* pool);
~IncrementalAABBPrunerCore();
void release();
bool addObject(const PoolIndex poolIndex, PxU32 timeStamp);
bool removeObject(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex, PxU32& timeStamp);
// if we swap object from bucket pruner index with an index in the regular AABB pruner
void swapIndex(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex);
bool updateObject(const PoolIndex poolIndex);
PxU32 removeMarkedObjects(PxU32 timeStamp);
bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback&) const;
bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback&) const;
void getGlobalBounds(PxBounds3&) const;
void shiftOrigin(const PxVec3& shift);
void visualize(PxRenderOutput& out, PxU32 color) const;
PX_FORCE_INLINE void timeStampChange()
{
// swap current and last tree
mLastTree = (mLastTree + 1) % 2;
mCurrentTree = (mCurrentTree + 1) % 2;
}
void build() {}
PX_FORCE_INLINE PxU32 getNbObjects() const { return mAABBTree[0].mapping.size() + mAABBTree[1].mapping.size(); }
private:
void updateMapping(IncrementalPrunerMap& mapping, const PoolIndex poolIndex, IncrementalAABBTreeNode* node);
void test(bool hierarchyCheck = true);
private:
static const PxU32 NUM_TREES = 2;
PxU32 mCurrentTree;
PxU32 mLastTree;
CoreTree mAABBTree[NUM_TREES];
const PruningPool* mPool; // Pruning pool from AABB pruner
NodeList mChangedLeaves;
};
}}
#endif
| 4,211 | C | 37.290909 | 126 | 0.725481 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMTD.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuMTD.h"
#include "GuDistancePointSegment.h"
#include "GuDistanceSegmentSegment.h"
#include "GuDistanceSegmentBox.h"
#include "GuVecBox.h"
#include "GuVecCapsule.h"
#include "GuVecConvexHullNoScale.h"
#include "GuInternal.h"
#include "GuContactMethodImpl.h"
#include "GuBoxConversion.h"
#include "GuPCMShapeConvex.h"
#include "GuPCMContactGen.h"
#include "GuConvexMesh.h"
#include "GuGJK.h"
#include "GuSphere.h"
#include "geomutils/PxContactBuffer.h"
using namespace physx;
using namespace Gu;
static PX_FORCE_INLINE float validateDepth(float depth)
{
// PT: penetration depth must always be positive or null, but FPU accuracy being what it is, we sometimes
// end up with very small, epsilon-sized negative depths. We clamp those to zero, since they don't indicate
// real bugs in the MTD functions. However anything larger than epsilon is wrong, and caught with an assert.
const float epsilon = 1.e-3f;
//ML: because we are shrunking the shape in this moment, so the depth might be larger than eps, this condition is no longer valid
//PX_ASSERT(depth>=-epsilon);
PX_UNUSED(epsilon);
return PxMax(depth, 0.0f);
}
///////////////////////////////////////////////////////////////////////////////
// PT: the function names should follow the order in which the PxGeometryTypes are listed,
// i.e. computeMTD_Type0Type1 with Type0<=Type1. This is to guarantee that the proper results
// (following the desired convention) are returned from the PxGeometryQuery-level call.
///////////////////////////////////////////////////////////////////////////////
static bool computeMTD_SphereSphere(PxVec3& mtd, PxF32& depth, const Sphere& sphere0, const Sphere& sphere1)
{
const PxVec3 delta = sphere0.center - sphere1.center;
const PxReal d2 = delta.magnitudeSquared();
const PxReal radiusSum = sphere0.radius + sphere1.radius;
if(d2 > radiusSum*radiusSum)
return false;
const PxF32 d = manualNormalize(mtd, delta, d2);
depth = validateDepth(radiusSum - d);
return true;
}
///////////////////////////////////////////////////////////////////////////////
static bool computeMTD_SphereCapsule(PxVec3& mtd, PxF32& depth, const Sphere& sphere, const Capsule& capsule)
{
const PxReal radiusSum = sphere.radius + capsule.radius;
PxReal u;
const PxReal d2 = distancePointSegmentSquared(capsule, sphere.center, &u);
if(d2 > radiusSum*radiusSum)
return false;
const PxVec3 normal = sphere.center - capsule.getPointAt(u);
const PxReal lenSq = normal.magnitudeSquared();
const PxF32 d = manualNormalize(mtd, normal, lenSq);
depth = validateDepth(radiusSum - d);
return true;
}
///////////////////////////////////////////////////////////////////////////////
//This version is ported 1:1 from novodex
static PX_FORCE_INLINE bool ContactSphereBox(const PxVec3& sphereOrigin,
PxReal sphereRadius,
const PxVec3& boxExtents,
// const PxcCachedTransforms& boxCacheTransform,
const PxTransform32& boxTransform,
PxVec3& point,
PxVec3& normal,
PxReal& separation,
PxReal contactDistance)
{
//returns true on contact
const PxVec3 delta = sphereOrigin - boxTransform.p; // s1.center - s2.center;
PxVec3 dRot = boxTransform.rotateInv(delta); //transform delta into OBB body coords.
//check if delta is outside ABB - and clip the vector to the ABB.
bool outside = false;
if(dRot.x < -boxExtents.x)
{
outside = true;
dRot.x = -boxExtents.x;
}
else if(dRot.x > boxExtents.x)
{
outside = true;
dRot.x = boxExtents.x;
}
if(dRot.y < -boxExtents.y)
{
outside = true;
dRot.y = -boxExtents.y;
}
else if(dRot.y > boxExtents.y)
{
outside = true;
dRot.y = boxExtents.y;
}
if(dRot.z < -boxExtents.z)
{
outside = true;
dRot.z =-boxExtents.z;
}
else if(dRot.z > boxExtents.z)
{
outside = true;
dRot.z = boxExtents.z;
}
if(outside) //if clipping was done, sphere center is outside of box.
{
point = boxTransform.rotate(dRot); //get clipped delta back in world coords.
normal = delta - point; //what we clipped away.
const PxReal lenSquared = normal.magnitudeSquared();
const PxReal inflatedDist = sphereRadius + contactDistance;
if(lenSquared > inflatedDist * inflatedDist)
return false; //disjoint
//normalize to make it into the normal:
separation = PxRecipSqrt(lenSquared);
normal *= separation;
separation *= lenSquared;
//any plane that touches the sphere is tangential, so a vector from contact point to sphere center defines normal.
//we could also use point here, which has same direction.
//this is either a faceFace or a vertexFace contact depending on whether the box's face or vertex collides, but we did not distinguish.
//We'll just use vertex face for now, this info isn't really being used anyway.
//contact point is point on surface of cube closest to sphere center.
point += boxTransform.p;
separation -= sphereRadius;
return true;
}
else
{
//center is in box, we definitely have a contact.
PxVec3 locNorm; //local coords contact normal
PxVec3 absdRot;
absdRot = PxVec3(PxAbs(dRot.x), PxAbs(dRot.y), PxAbs(dRot.z));
PxVec3 distToSurface = boxExtents - absdRot; //dist from embedded center to box surface along 3 dimensions.
//find smallest element of distToSurface
if(distToSurface.y < distToSurface.x)
{
if(distToSurface.y < distToSurface.z)
{
//y
locNorm = PxVec3(0.0f, dRot.y > 0.0f ? 1.0f : -1.0f, 0.0f);
separation = -distToSurface.y;
}
else
{
//z
locNorm = PxVec3(0.0f,0.0f, dRot.z > 0.0f ? 1.0f : -1.0f);
separation = -distToSurface.z;
}
}
else
{
if(distToSurface.x < distToSurface.z)
{
//x
locNorm = PxVec3(dRot.x > 0.0f ? 1.0f : -1.0f, 0.0f, 0.0f);
separation = -distToSurface.x;
}
else
{
//z
locNorm = PxVec3(0.0f,0.0f, dRot.z > 0.0f ? 1.0f : -1.0f);
separation = -distToSurface.z;
}
}
//separation so far is just the embedding of the center point; we still have to push out all of the radius.
point = sphereOrigin;
normal = boxTransform.rotate(locNorm);
separation -= sphereRadius;
return true;
}
}
static bool computeMTD_SphereBox(PxVec3& mtd, PxF32& depth, const Sphere& sphere, const Box& box)
{
PxVec3 point;
if(!ContactSphereBox( sphere.center, sphere.radius,
box.extents, PxTransform32(box.center, PxQuat(box.rot)),
point, mtd, depth, 0.0f))
return false;
depth = validateDepth(-depth);
return true;
}
///////////////////////////////////////////////////////////////////////////////
static bool computeMTD_CapsuleCapsule(PxVec3& mtd, PxF32& depth, const Capsule& capsule0, const Capsule& capsule1)
{
PxReal s,t;
const PxReal d2 = distanceSegmentSegmentSquared(capsule0, capsule1, &s, &t);
const PxReal radiusSum = capsule0.radius + capsule1.radius;
if(d2 > radiusSum*radiusSum)
return false;
const PxVec3 normal = capsule0.getPointAt(s) - capsule1.getPointAt(t);
const PxReal lenSq = normal.magnitudeSquared();
const PxF32 d = manualNormalize(mtd, normal, lenSq);
depth = validateDepth(radiusSum - d);
return true;
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE void reorderMTD(PxVec3& mtd, const PxVec3& center0, const PxVec3& center1)
{
const PxVec3 witness = center0 - center1;
if(mtd.dot(witness) < 0.0f)
mtd = -mtd;
}
static PX_FORCE_INLINE void projectBox(PxReal& min, PxReal& max, const PxVec3& axis, const Box& box)
{
const PxReal boxCen = box.center.dot(axis);
const PxReal boxExt =
PxAbs(box.rot.column0.dot(axis)) * box.extents.x
+ PxAbs(box.rot.column1.dot(axis)) * box.extents.y
+ PxAbs(box.rot.column2.dot(axis)) * box.extents.z;
min = boxCen - boxExt;
max = boxCen + boxExt;
}
static bool PxcTestAxis(const PxVec3& axis, const Segment& segment, PxReal radius, const Box& box, PxReal& depth)
{
// Project capsule
PxReal min0 = segment.p0.dot(axis);
PxReal max0 = segment.p1.dot(axis);
if(min0>max0) PxSwap(min0, max0);
min0 -= radius;
max0 += radius;
// Project box
PxReal Min1, Max1;
projectBox(Min1, Max1, axis, box);
// Test projections
if(max0<Min1 || Max1<min0)
return false;
const PxReal d0 = max0 - Min1;
PX_ASSERT(d0>=0.0f);
const PxReal d1 = Max1 - min0;
PX_ASSERT(d1>=0.0f);
depth = physx::intrinsics::selectMin(d0, d1);
return true;
}
static bool PxcCapsuleOBBOverlap3(const Segment& segment, PxReal radius, const Box& box, PxReal* t=NULL, PxVec3* pp=NULL)
{
PxVec3 Sep(0.0f);
PxReal PenDepth = PX_MAX_REAL;
// Test normals
for(PxU32 i=0;i<3;i++)
{
PxReal d;
if(!PxcTestAxis(box.rot[i], segment, radius, box, d))
return false;
if(d<PenDepth)
{
PenDepth = d;
Sep = box.rot[i];
}
}
// Test edges
PxVec3 CapsuleAxis(segment.p1 - segment.p0);
CapsuleAxis = CapsuleAxis.getNormalized();
for(PxU32 i=0;i<3;i++)
{
PxVec3 Cross = CapsuleAxis.cross(box.rot[i]);
if(!isAlmostZero(Cross))
{
Cross = Cross.getNormalized();
PxReal d;
if(!PxcTestAxis(Cross, segment, radius, box, d))
return false;
if(d<PenDepth)
{
PenDepth = d;
Sep = Cross;
}
}
}
reorderMTD(Sep, segment.computeCenter(), box.center);
if(t)
*t = validateDepth(PenDepth);
if(pp)
*pp = Sep;
return true;
}
static bool computeMTD_CapsuleBox(PxVec3& mtd, PxF32& depth, const Capsule& capsule, const Box& box)
{
PxReal t;
PxVec3 onBox;
const PxReal d2 = distanceSegmentBoxSquared(capsule.p0, capsule.p1, box.center, box.extents, box.rot, &t, &onBox);
if(d2 > capsule.radius*capsule.radius)
return false;
if(d2 != 0.0f)
{
// PT: the capsule segment doesn't intersect the box => distance-based version
const PxVec3 onSegment = capsule.getPointAt(t);
onBox = box.center + box.rot.transform(onBox);
PxVec3 normal = onSegment - onBox;
PxReal normalLen = normal.magnitude();
if(normalLen != 0.0f)
{
normal *= 1.0f/normalLen;
mtd = normal;
depth = validateDepth(capsule.radius - PxSqrt(d2));
return true;
}
}
// PT: the capsule segment intersects the box => penetration-based version
return PxcCapsuleOBBOverlap3(capsule, capsule.radius, box, &depth, &mtd);
}
///////////////////////////////////////////////////////////////////////////////
static bool PxcTestAxis(const PxVec3& axis, const Box& box0, const Box& box1, PxReal& depth)
{
// Project box
PxReal min0, max0;
projectBox(min0, max0, axis, box0);
// Project box
PxReal Min1, Max1;
projectBox(Min1, Max1, axis, box1);
// Test projections
if(max0<Min1 || Max1<min0)
return false;
const PxReal d0 = max0 - Min1;
PX_ASSERT(d0>=0.0f);
const PxReal d1 = Max1 - min0;
PX_ASSERT(d1>=0.0f);
depth = physx::intrinsics::selectMin(d0, d1);
return true;
}
static PX_FORCE_INLINE bool testBoxBoxAxis(PxVec3& mtd, PxF32& depth, const PxVec3& axis, const Box& box0, const Box& box1)
{
PxF32 d;
if(!PxcTestAxis(axis, box0, box1, d))
return false;
if(d<depth)
{
depth = d;
mtd = axis;
}
return true;
}
static bool computeMTD_BoxBox(PxVec3& _mtd, PxF32& _depth, const Box& box0, const Box& box1)
{
PxVec3 mtd;
PxF32 depth = PX_MAX_F32;
if(!testBoxBoxAxis(mtd, depth, box0.rot.column0, box0, box1))
return false;
if(!testBoxBoxAxis(mtd, depth, box0.rot.column1, box0, box1))
return false;
if(!testBoxBoxAxis(mtd, depth, box0.rot.column2, box0, box1))
return false;
if(!testBoxBoxAxis(mtd, depth, box1.rot.column0, box0, box1))
return false;
if(!testBoxBoxAxis(mtd, depth, box1.rot.column1, box0, box1))
return false;
if(!testBoxBoxAxis(mtd, depth, box1.rot.column2, box0, box1))
return false;
for(PxU32 j=0;j<3;j++)
{
for(PxU32 i=0;i<3;i++)
{
PxVec3 cross = box0.rot[i].cross(box1.rot[j]);
if(!isAlmostZero(cross))
{
cross = cross.getNormalized();
if(!testBoxBoxAxis(mtd, depth, cross, box0, box1))
return false;
}
}
}
reorderMTD(mtd, box1.center, box0.center);
_mtd = -mtd;
_depth = validateDepth(depth);
return true;
}
///////////////////////////////////////////////////////////////////////////////
using namespace physx::aos;
bool pointConvexDistance(PxVec3& normal_, PxVec3& closestPoint_, PxReal& sqDistance, const PxVec3& pt, const ConvexMesh* convexMesh, const PxMeshScale& meshScale, const PxTransform32& convexPose)
{
const PxTransform transform0(pt);
PxVec3 onSegment, onConvex;
using namespace aos;
const Vec3V zeroV = V3Zero();
Vec3V closA, closB, normalV;
GjkStatus status;
FloatV dist;
{
const ConvexHullData* hullData = &convexMesh->getHull();
const Vec3V vScale = V3LoadU_SafeReadW(meshScale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&meshScale.rotation.x);
const ConvexHullV convexHull_(hullData, zeroV, vScale, vQuat, meshScale.isIdentity());
const PxMatTransformV aToB(convexPose.transformInv(transform0));
//const CapsuleV capsule(zeroV, zeroV, FZero());//this is a point
const CapsuleV capsule_(aToB.p, FZero());//this is a point
const LocalConvex<CapsuleV> capsule(capsule_);
const LocalConvex<ConvexHullV> convexHull(convexHull_);
status = gjk<LocalConvex<CapsuleV>, LocalConvex<ConvexHullV> >(capsule, convexHull, aToB.p, FMax(), closA, closB, normalV, dist);
}
bool intersect = status == GJK_CONTACT;
if(intersect)
{
sqDistance = 0.0f;
}
else
{
const FloatV sqDist = FMul(dist, dist);
FStore(sqDist, &sqDistance);
V3StoreU(normalV, normal_);
V3StoreU(closB, closestPoint_);
normal_ = convexPose.rotate(normal_);
closestPoint_ = convexPose.transform(closestPoint_);
}
return intersect;
}
static bool computeMTD_SphereConvex(PxVec3& mtd, PxF32& depth, const Sphere& sphere, const PxConvexMeshGeometry& convexGeom, const PxTransform32& convexPose)
{
PxReal d2;
const ConvexMesh* convexMesh = static_cast<const ConvexMesh*>(convexGeom.convexMesh);
PxVec3 dummy;
if(!pointConvexDistance(mtd, dummy, d2, sphere.center, convexMesh, convexGeom.scale, convexPose))
{
if(d2 > sphere.radius*sphere.radius)
return false;
depth = validateDepth(sphere.radius - PxSqrt(d2));
mtd = -mtd;
return true;
}
// PT: if we reach this place, the sphere center touched the convex => switch to penetration-based code
PxU32 nbPolygons = convexMesh->getNbPolygonsFast();
const HullPolygonData* polygons = convexMesh->getPolygons();
const PxVec3 localSphereCenter = convexPose.transformInv(sphere.center);
PxReal dmax = -PX_MAX_F32;
while(nbPolygons--)
{
const HullPolygonData& polygon = *polygons++;
const PxF32 d = polygon.mPlane.distance(localSphereCenter);
if(d>dmax)
{
dmax = d;
mtd = convexPose.rotate(polygon.mPlane.n);
}
}
depth = validateDepth(sphere.radius - dmax);
return true;
}
///////////////////////////////////////////////////////////////////////////////
//ML : capsule will be in the local space of convexHullV
static bool internalComputeMTD_CapsuleConvex(const CapsuleV& capsule, const bool idtScale, const ConvexHullV& convexHullV, const aos::PxTransformV& transf1,
aos::FloatV& penetrationDepth, aos::Vec3V& normal)
{
PolygonalData polyData;
getPCMConvexData(convexHullV, idtScale, polyData);
PX_ALIGN(16, PxU8 buff[sizeof(SupportLocalImpl<ConvexHullV>)]);
SupportLocal* map = (idtScale ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHullV), transf1, convexHullV.vertex2Shape, convexHullV.shape2Vertex, idtScale)) :
static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff, SupportLocalImpl<ConvexHullV>)(convexHullV, transf1, convexHullV.vertex2Shape, convexHullV.shape2Vertex, idtScale)));
return computeMTD(capsule, polyData, map, penetrationDepth, normal);
}
static bool computeMTD_CapsuleConvex(PxVec3& mtd, PxF32& depth, const Capsule& capsule, const PxTransform32& capsulePose, const PxConvexMeshGeometry& convexGeom, const PxTransform32& convexPose)
{
const FloatV capsuleHalfHeight = FLoad(capsule.length()*0.5f);
const FloatV capsuleRadius = FLoad(capsule.radius);
const Vec3V zeroV = V3Zero();
// Convex mesh
const ConvexMesh* convexMesh = static_cast<const ConvexMesh*>(convexGeom.convexMesh);
const ConvexHullData* hull = &convexMesh->getHull();
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const ConvexHullV convexHullV(hull, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
//~Convex mesh
const QuatV q0 = QuatVLoadU(&capsulePose.q.x);
const Vec3V p0 = V3LoadU(&capsulePose.p.x);
const QuatV q1 = QuatVLoadU(&convexPose.q.x);
const Vec3V p1 = V3LoadU(&convexPose.p.x);
const PxTransformV transf0(p0, q0);
const PxTransformV transf1(p1, q1);
const PxTransformV curRTrans(transf1.transformInv(transf0));
const PxMatTransformV aToB(curRTrans);
Vec3V normal = zeroV;
FloatV penetrationDepth = FZero();
const CapsuleV capsuleV(aToB.p, aToB.rotate(V3Scale(V3UnitX(), capsuleHalfHeight)), capsuleRadius);
const bool idtScale = convexGeom.scale.isIdentity();
bool hasContacts = internalComputeMTD_CapsuleConvex(capsuleV, idtScale, convexHullV, transf1, penetrationDepth, normal);
if(hasContacts)
{
FStore(penetrationDepth, &depth);
depth = validateDepth(depth);
V3StoreU(normal, mtd);
}
return hasContacts;
}
///////////////////////////////////////////////////////////////////////////////
static bool internalComputeMTD_BoxConvex(const PxVec3 halfExtents, const BoxV& box, const bool idtScale, const ConvexHullV& convexHullV, const aos::PxTransformV& transf0, const aos::PxTransformV& transf1,
aos::FloatV& penetrationDepth, aos::Vec3V& normal)
{
PolygonalData polyData0;
PCMPolygonalBox polyBox0(halfExtents);
polyBox0.getPolygonalData(&polyData0);
polyData0.mPolygonVertexRefs = gPCMBoxPolygonData;
PolygonalData polyData1;
getPCMConvexData(convexHullV, idtScale, polyData1);
const Mat33V identity = M33Identity();
SupportLocalImpl<BoxV> map0(box, transf0, identity, identity, true);
PX_ALIGN(16, PxU8 buff[sizeof(SupportLocalImpl<ConvexHullV>)]);
SupportLocal* map1 = (idtScale ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHullV), transf1, convexHullV.vertex2Shape, convexHullV.shape2Vertex, idtScale)) :
static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff, SupportLocalImpl<ConvexHullV>)(convexHullV, transf1, convexHullV.vertex2Shape, convexHullV.shape2Vertex, idtScale)));
return computeMTD(polyData0, polyData1, &map0, map1, penetrationDepth, normal);
}
static bool computeMTD_BoxConvex(PxVec3& mtd, PxF32& depth, const Box& box, const PxConvexMeshGeometry& convexGeom, const PxTransform32& convexPose)
{
const Vec3V zeroV = V3Zero();
const PxTransform boxPose = box.getTransform();
const Vec3V boxExtents = V3LoadU(box.extents);
const BoxV boxV(zeroV, boxExtents);
// Convex mesh
const ConvexMesh* convexMesh = static_cast<const ConvexMesh*>(convexGeom.convexMesh);
const ConvexHullData* hull = &convexMesh->getHull();
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const ConvexHullV convexHullV(hull, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
//~Convex mesh
const QuatV q0 = QuatVLoadU(&boxPose.q.x);
const Vec3V p0 = V3LoadU(&boxPose.p.x);
const QuatV q1 = QuatVLoadU(&convexPose.q.x);
const Vec3V p1 = V3LoadU(&convexPose.p.x);
const PxTransformV transf0(p0, q0);
const PxTransformV transf1(p1, q1);
Vec3V normal=zeroV;
FloatV penetrationDepth=FZero();
const bool idtScale = convexGeom.scale.isIdentity();
bool hasContacts = internalComputeMTD_BoxConvex(box.extents, boxV, idtScale, convexHullV, transf0, transf1, penetrationDepth, normal);
if(hasContacts)
{
FStore(penetrationDepth, &depth);
depth = validateDepth(depth);
V3StoreU(normal, mtd);
}
return hasContacts;
}
static bool internalComputeMTD_ConvexConvex(const bool idtScale0, const bool idtScale1, const ConvexHullV& convexHullV0, const ConvexHullV& convexHullV1, const aos::PxTransformV& transf0, const aos::PxTransformV& transf1,
aos::FloatV& penetrationDepth, aos::Vec3V& normal)
{
PolygonalData polyData0, polyData1;
getPCMConvexData(convexHullV0, idtScale0, polyData0);
getPCMConvexData(convexHullV1, idtScale1, polyData1);
PX_ALIGN(16, PxU8 buff0[sizeof(SupportLocalImpl<ConvexHullV>)]);
PX_ALIGN(16, PxU8 buff1[sizeof(SupportLocalImpl<ConvexHullV>)]);
SupportLocal* map0 = (idtScale0 ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff0, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHullV0), transf0, convexHullV0.vertex2Shape, convexHullV0.shape2Vertex, idtScale0)) :
static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff0, SupportLocalImpl<ConvexHullV>)(convexHullV0, transf0, convexHullV0.vertex2Shape, convexHullV0.shape2Vertex, idtScale0)));
SupportLocal* map1 = (idtScale1 ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff1, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHullV1), transf1, convexHullV1.vertex2Shape, convexHullV1.shape2Vertex, idtScale1)) :
static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff1, SupportLocalImpl<ConvexHullV>)(convexHullV1, transf1, convexHullV1.vertex2Shape, convexHullV1.shape2Vertex, idtScale1)));
return computeMTD(polyData0, polyData1, map0, map1, penetrationDepth, normal);
}
///////////////////////////////////////////////////////////////////////////////
static bool computeMTD_ConvexConvex(PxVec3& mtd, PxF32& depth, const PxConvexMeshGeometry& convexGeom0, const PxTransform32& convexPose0, const PxConvexMeshGeometry& convexGeom1, const PxTransform32& convexPose1)
{
using namespace aos;
const Vec3V zeroV = V3Zero();
// Convex mesh
const ConvexMesh* convexMesh0 = static_cast<const ConvexMesh*>(convexGeom0.convexMesh);
const ConvexHullData* hull0 = &convexMesh0->getHull();
const Vec3V vScale0 = V3LoadU_SafeReadW(convexGeom0.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat0 = QuatVLoadU(&convexGeom0.scale.rotation.x);
const ConvexHullV convexHullV0(hull0, zeroV, vScale0, vQuat0, convexGeom0.scale.isIdentity());
//~Convex mesh
// Convex mesh
const ConvexMesh* convexMesh1 = static_cast<const ConvexMesh*>(convexGeom1.convexMesh);
const ConvexHullData* hull1 = &convexMesh1->getHull();
const Vec3V vScale1 = V3LoadU_SafeReadW(convexGeom1.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat1 = QuatVLoadU(&convexGeom1.scale.rotation.x);
const ConvexHullV convexHullV1(hull1, zeroV, vScale1, vQuat1, convexGeom1.scale.isIdentity());
//~Convex mesh
const QuatV q0 = QuatVLoadU(&convexPose0.q.x);
const Vec3V p0 = V3LoadU(&convexPose0.p.x);
const QuatV q1 = QuatVLoadU(&convexPose1.q.x);
const Vec3V p1 = V3LoadU(&convexPose1.p.x);
const PxTransformV transf0(p0, q0);
const PxTransformV transf1(p1, q1);
Vec3V normal = zeroV;
FloatV penetrationDepth = FZero();
const bool idtScale0 = convexGeom0.scale.isIdentity();
const bool idtScale1 = convexGeom1.scale.isIdentity();
bool hasContacts = internalComputeMTD_ConvexConvex(idtScale0, idtScale1, convexHullV0, convexHullV1, transf0, transf1, penetrationDepth, normal);
if(hasContacts)
{
FStore(penetrationDepth, &depth);
depth = validateDepth(depth);
V3StoreU(normal, mtd);
}
return hasContacts;
}
///////////////////////////////////////////////////////////////////////////////
static bool computeMTD_SpherePlane(PxVec3& mtd, PxF32& depth, const Sphere& sphere, const PxPlane& plane)
{
const PxReal d = plane.distance(sphere.center);
if(d>sphere.radius)
return false;
mtd = plane.n;
depth = validateDepth(sphere.radius - d);
return true;
}
static bool computeMTD_PlaneBox(PxVec3& mtd, PxF32& depth, const PxPlane& plane, const Box& box)
{
PxVec3 pts[8];
box.computeBoxPoints(pts);
PxReal dmin = plane.distance(pts[0]);
for(PxU32 i=1;i<8;i++)
{
const PxReal d = plane.distance(pts[i]);
dmin = physx::intrinsics::selectMin(dmin, d);
}
if(dmin>0.0f)
return false;
mtd = -plane.n;
depth = validateDepth(-dmin);
return true;
}
static bool computeMTD_PlaneCapsule(PxVec3& mtd, PxF32& depth, const PxPlane& plane, const Capsule& capsule)
{
const PxReal d0 = plane.distance(capsule.p0);
const PxReal d1 = plane.distance(capsule.p1);
const PxReal dmin = physx::intrinsics::selectMin(d0, d1) - capsule.radius;
if(dmin>0.0f)
return false;
mtd = -plane.n;
depth = validateDepth(-dmin);
return true;
}
static bool computeMTD_PlaneConvex(PxVec3& mtd, PxF32& depth, const PxPlane& plane, const PxConvexMeshGeometry& convexGeom, const PxTransform32& convexPose)
{
const ConvexMesh* convexMesh = static_cast<const ConvexMesh*>(convexGeom.convexMesh);
PxU32 nbVerts = convexMesh->getNbVerts();
const PxVec3* PX_RESTRICT verts = convexMesh->getVerts();
PxReal dmin = plane.distance(convexPose.transform(verts[0]));
for(PxU32 i=1;i<nbVerts;i++)
{
const PxReal d = plane.distance(convexPose.transform(verts[i]));
dmin = physx::intrinsics::selectMin(dmin, d);
}
if(dmin>0.0f)
return false;
mtd = -plane.n;
depth = validateDepth(-dmin);
return true;
}
///////////////////////////////////////////////////////////////////////////////
static bool processContacts(PxVec3& mtd, PxF32& depth, const PxU32 nbContacts, const PxContactPoint* contacts)
{
if(nbContacts)
{
PxVec3 mn(0.0f), mx(0.0f);
for(PxU32 i=0; i<nbContacts; i++)
{
const PxContactPoint& ct = contacts[i];
PxVec3 depenetration = ct.separation * ct.normal;
mn = mn.minimum(depenetration);
mx = mx.maximum(depenetration);
}
// even if we are already moving in separation direction, we should still depenetrate
// so no dot velocity test
// here we attempt to equalize the separations pushing in opposing directions along each axis
PxVec3 mn1, mx1;
mn1.x = (mn.x == 0.0f) ? mx.x : mn.x;
mn1.y = (mn.y == 0.0f) ? mx.y : mn.y;
mn1.z = (mn.z == 0.0f) ? mx.z : mn.z;
mx1.x = (mx.x == 0.0f) ? mn.x : mx.x;
mx1.y = (mx.y == 0.0f) ? mn.y : mx.y;
mx1.z = (mx.z == 0.0f) ? mn.z : mx.z;
PxVec3 sepDir((mn1 + mx1)*0.5f);
if(sepDir.magnitudeSquared() < 1e-10f)
return false;
mtd = -sepDir.getNormalized();
depth = sepDir.magnitude();
}
return nbContacts!=0;
}
static bool computeMTD_SphereMesh(PxVec3& mtd, PxF32& depth, const Sphere& sphere, const PxTriangleMeshGeometry& meshGeom, const PxTransform32& meshPose)
{
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactSphereMesh(PxSphereGeometry(sphere.radius), meshGeom, PxTransform32(sphere.center), meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_CapsuleMesh(PxVec3& mtd, PxF32& depth, const Capsule& capsule, const PxTriangleMeshGeometry& meshGeom, const PxTransform32& meshPose)
{
PxReal halfHeight;
const PxTransform32 capsuleTransform(PxTransformFromSegment(capsule.p0, capsule.p1, &halfHeight));
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactCapsuleMesh(PxCapsuleGeometry(capsule.radius, halfHeight), meshGeom, capsuleTransform, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_BoxMesh(PxVec3& mtd, PxF32& depth, const Box& box, const PxTriangleMeshGeometry& meshGeom, const PxTransform32& meshPose)
{
const PxTransform32 boxPose(box.center, PxQuat(box.rot));
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactBoxMesh(PxBoxGeometry(box.extents), meshGeom, boxPose, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_ConvexMesh(PxVec3& mtd, PxF32& depth, const PxConvexMeshGeometry& convexGeom, const PxTransform32& convexPose, const PxTriangleMeshGeometry& meshGeom, const PxTransform32& meshPose)
{
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactConvexMesh(convexGeom, meshGeom, convexPose, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_SphereHeightField(PxVec3& mtd, PxF32& depth, const Sphere& sphere, const PxHeightFieldGeometry& meshGeom, const PxTransform32& meshPose)
{
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
const PxTransform32 spherePose(sphere.center);
if(!contactSphereHeightfield(PxSphereGeometry(sphere.radius), meshGeom, spherePose, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_CapsuleHeightField(PxVec3& mtd, PxF32& depth, const Capsule& capsule, const PxHeightFieldGeometry& meshGeom, const PxTransform32& meshPose)
{
PxReal halfHeight;
const PxTransform32 capsuleTransform(PxTransformFromSegment(capsule.p0, capsule.p1, &halfHeight));
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactCapsuleHeightfield(PxCapsuleGeometry(capsule.radius, halfHeight), meshGeom, capsuleTransform, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_BoxHeightField(PxVec3& mtd, PxF32& depth, const Box& box, const PxHeightFieldGeometry& meshGeom, const PxTransform32& meshPose)
{
const PxTransform32 boxPose(box.center, PxQuat(box.rot));
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactBoxHeightfield(PxBoxGeometry(box.extents), meshGeom, boxPose, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_ConvexHeightField(PxVec3& mtd, PxF32& depth, const PxConvexMeshGeometry& convexGeom, const PxTransform32& convexPose, const PxHeightFieldGeometry& meshGeom, const PxTransform32& meshPose)
{
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!contactConvexHeightfield(convexGeom, meshGeom, convexPose, meshPose, NarrowPhaseParams(0.0f, 0.0f, 1.0f), cache, contactBuffer, NULL))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool computeMTD_CustomGeometry(PxVec3& mtd, PxF32& depth, const PxCustomGeometry& geom0, const PxTransform32& pose0, const PxGeometry& geom1, const PxTransform32& pose1)
{
Cache cache;
PxContactBuffer contactBuffer;
contactBuffer.reset();
if(!geom0.callbacks->generateContacts(geom0, geom1, pose0, pose1, FLT_EPSILON, FLT_EPSILON, 1.0f, contactBuffer))
return false;
return processContacts(mtd, depth, contactBuffer.count, contactBuffer.contacts);
}
static bool GeomMTDCallback_NotSupported(GU_MTD_FUNC_PARAMS)
{
PX_ALWAYS_ASSERT_MESSAGE("NOT SUPPORTED");
PX_UNUSED(mtd); PX_UNUSED(depth); PX_UNUSED(geom0); PX_UNUSED(geom1); PX_UNUSED(pose0); PX_UNUSED(pose1);
return false;
}
static bool GeomMTDCallback_SphereSphere(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eSPHERE);
const PxSphereGeometry& sphereGeom0 = static_cast<const PxSphereGeometry&>(geom0);
const PxSphereGeometry& sphereGeom1 = static_cast<const PxSphereGeometry&>(geom1);
return computeMTD_SphereSphere(mtd, depth, Sphere(pose0.p, sphereGeom0.radius), Sphere(pose1.p, sphereGeom1.radius));
}
static bool GeomMTDCallback_SpherePlane(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::ePLANE);
PX_UNUSED(geom1);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
return computeMTD_SpherePlane(mtd, depth, Sphere(pose0.p, sphereGeom.radius), getPlane(pose1));
}
static bool GeomMTDCallback_SphereCapsule(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom1);
Capsule capsule;
getCapsuleSegment(pose1, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
return computeMTD_SphereCapsule(mtd, depth, Sphere(pose0.p, sphereGeom.radius), capsule);
}
static bool GeomMTDCallback_SphereBox(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
Box obb;
buildFrom(obb, pose1.p, boxGeom.halfExtents, pose1.q);
return computeMTD_SphereBox(mtd, depth, Sphere(pose0.p, sphereGeom.radius), obb);
}
static bool GeomMTDCallback_SphereConvex(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
return computeMTD_SphereConvex(mtd, depth, Sphere(pose0.p, sphereGeom.radius), convexGeom, pose1);
}
static bool GeomMTDCallback_SphereMesh(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eTRIANGLEMESH);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom1);
return computeMTD_SphereMesh(mtd, depth, Sphere(pose0.p, sphereGeom.radius), meshGeom, pose1);
}
static bool GeomMTDCallback_PlaneCapsule(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
PX_UNUSED(geom0);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom1);
Capsule capsule;
getCapsuleSegment(pose1, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
return computeMTD_PlaneCapsule(mtd, depth, getPlane(pose0), capsule);
}
static bool GeomMTDCallback_PlaneBox(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
Box obb;
buildFrom(obb, pose1.p, boxGeom.halfExtents, pose1.q);
return computeMTD_PlaneBox(mtd, depth, getPlane(pose0), obb);
}
static bool GeomMTDCallback_PlaneConvex(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
return computeMTD_PlaneConvex(mtd, depth, getPlane(pose0), convexGeom, pose1);
}
static bool GeomMTDCallback_CapsuleCapsule(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
const PxCapsuleGeometry& capsuleGeom0 = static_cast<const PxCapsuleGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom1 = static_cast<const PxCapsuleGeometry&>(geom1);
Capsule capsule0;
getCapsuleSegment(pose0, capsuleGeom0, capsule0);
capsule0.radius = capsuleGeom0.radius;
Capsule capsule1;
getCapsuleSegment(pose1, capsuleGeom1, capsule1);
capsule1.radius = capsuleGeom1.radius;
return computeMTD_CapsuleCapsule(mtd, depth, capsule0, capsule1);
}
static bool GeomMTDCallback_CapsuleBox(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
Capsule capsule;
getCapsuleSegment(pose0, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
Box obb;
buildFrom(obb, pose1.p, boxGeom.halfExtents, pose1.q);
return computeMTD_CapsuleBox(mtd, depth, capsule, obb);
}
static bool GeomMTDCallback_CapsuleConvex(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
Capsule capsule;
getCapsuleSegment(pose0, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
return computeMTD_CapsuleConvex(mtd, depth, capsule, pose0, convexGeom, pose1);
}
static bool GeomMTDCallback_CapsuleMesh(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eTRIANGLEMESH);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom1);
Capsule capsule;
getCapsuleSegment(pose0, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
return computeMTD_CapsuleMesh(mtd, depth, capsule, meshGeom, pose1);
}
static bool GeomMTDCallback_BoxBox(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
const PxBoxGeometry& boxGeom0 = static_cast<const PxBoxGeometry&>(geom0);
const PxBoxGeometry& boxGeom1 = static_cast<const PxBoxGeometry&>(geom1);
Box obb0;
buildFrom(obb0, pose0.p, boxGeom0.halfExtents, pose0.q);
Box obb1;
buildFrom(obb1, pose1.p, boxGeom1.halfExtents, pose1.q);
return computeMTD_BoxBox(mtd, depth, obb0, obb1);
}
static bool GeomMTDCallback_BoxConvex(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
Box obb;
buildFrom(obb, pose0.p, boxGeom.halfExtents, pose0.q);
return computeMTD_BoxConvex(mtd, depth, obb, convexGeom, pose1);
}
static bool GeomMTDCallback_BoxMesh(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eTRIANGLEMESH);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom1);
Box obb;
buildFrom(obb, pose0.p, boxGeom.halfExtents, pose0.q);
return computeMTD_BoxMesh(mtd, depth, obb, meshGeom, pose1);
}
static bool GeomMTDCallback_ConvexConvex(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCONVEXMESH);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
const PxConvexMeshGeometry& convexGeom0 = static_cast<const PxConvexMeshGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom1 = static_cast<const PxConvexMeshGeometry&>(geom1);
return computeMTD_ConvexConvex(mtd, depth, convexGeom0, pose0, convexGeom1, pose1);
}
static bool GeomMTDCallback_ConvexMesh(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCONVEXMESH);
PX_ASSERT(geom1.getType()==PxGeometryType::eTRIANGLEMESH);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom0);
const PxTriangleMeshGeometry& meshGeom = static_cast<const PxTriangleMeshGeometry&>(geom1);
return computeMTD_ConvexMesh(mtd, depth, convexGeom, pose0, meshGeom, pose1);
}
static bool GeomMTDCallback_SphereHeightField(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxHeightFieldGeometry& meshGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
const Sphere sphere(pose0.p, sphereGeom.radius);
return computeMTD_SphereHeightField(mtd, depth, sphere, meshGeom, pose1);
}
static bool GeomMTDCallback_CapsuleHeightField(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxHeightFieldGeometry& meshGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
Capsule capsule;
getCapsuleSegment(pose0, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
return computeMTD_CapsuleHeightField(mtd, depth, capsule, meshGeom, pose1);
}
static bool GeomMTDCallback_BoxHeightField(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
const PxHeightFieldGeometry& meshGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
Box obb;
buildFrom(obb, pose0.p, boxGeom.halfExtents, pose0.q);
return computeMTD_BoxHeightField(mtd, depth, obb, meshGeom, pose1);
}
static bool GeomMTDCallback_ConvexHeightField(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCONVEXMESH);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom0);
const PxHeightFieldGeometry& meshGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
return computeMTD_ConvexHeightField(mtd, depth, convexGeom, pose0, meshGeom, pose1);
}
static bool GeomMTDCallback_CustomGeometryGeometry(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType() == PxGeometryType::eCUSTOM);
const PxCustomGeometry& customGeom = static_cast<const PxCustomGeometry&>(geom0);
return computeMTD_CustomGeometry(mtd, depth, customGeom, pose0, geom1, pose1);
}
static bool GeomMTDCallback_GeometryCustomGeometry(GU_MTD_FUNC_PARAMS)
{
PX_ASSERT(geom1.getType() == PxGeometryType::eCUSTOM);
const PxCustomGeometry& customGeom = static_cast<const PxCustomGeometry&>(geom1);
if (computeMTD_CustomGeometry(mtd, depth, customGeom, pose1, geom0, pose0))
{
mtd = -mtd;
return true;
}
return false;
}
Gu::GeomMTDFunc gGeomMTDMethodTable[][PxGeometryType::eGEOMETRY_COUNT] =
{
//PxGeometryType::eSPHERE
{
GeomMTDCallback_SphereSphere, //PxGeometryType::eSPHERE
GeomMTDCallback_SpherePlane, //PxGeometryType::ePLANE
GeomMTDCallback_SphereCapsule, //PxGeometryType::eCAPSULE
GeomMTDCallback_SphereBox, //PxGeometryType::eBOX
GeomMTDCallback_SphereConvex, //PxGeometryType::eCONVEXMESH
GeomMTDCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_SphereMesh, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_SphereHeightField, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePLANE
{
0, //PxGeometryType::eSPHERE
GeomMTDCallback_NotSupported, //PxGeometryType::ePLANE
GeomMTDCallback_PlaneCapsule, //PxGeometryType::eCAPSULE
GeomMTDCallback_PlaneBox, //PxGeometryType::eBOX
GeomMTDCallback_PlaneConvex, //PxGeometryType::eCONVEXMESH
GeomMTDCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCAPSULE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
GeomMTDCallback_CapsuleCapsule, //PxGeometryType::eCAPSULE
GeomMTDCallback_CapsuleBox, //PxGeometryType::eBOX
GeomMTDCallback_CapsuleConvex, //PxGeometryType::eCONVEXMESH
GeomMTDCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_CapsuleMesh, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_CapsuleHeightField, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eBOX
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
GeomMTDCallback_BoxBox, //PxGeometryType::eBOX
GeomMTDCallback_BoxConvex, //PxGeometryType::eCONVEXMESH
GeomMTDCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_BoxMesh, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_BoxHeightField, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
GeomMTDCallback_ConvexConvex, //PxGeometryType::eCONVEXMESH
GeomMTDCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_ConvexMesh, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_ConvexHeightField, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePARTICLESYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
GeomMTDCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTETRAHEDRONMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_NotSupported, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTRIANGLEMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eHEIGHTFIELD
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
GeomMTDCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_GeometryCustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eHAIRSYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
GeomMTDCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_CustomGeometryGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCUSTOM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
0, //PxGeometryType::eHAIRSYSTEM
GeomMTDCallback_CustomGeometryGeometry, //PxGeometryType::eCUSTOM
},
};
PX_COMPILE_TIME_ASSERT(sizeof(gGeomMTDMethodTable) / sizeof(gGeomMTDMethodTable[0]) == PxGeometryType::eGEOMETRY_COUNT);
| 51,608 | C++ | 34.276145 | 255 | 0.733239 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMeshFactory.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_MESH_FACTORY_H
#define GU_MESH_FACTORY_H
#include "foundation/PxIO.h"
#include "foundation/PxHashSet.h"
#include "foundation/PxUserAllocated.h"
#include "geometry/PxTriangleMesh.h"
#include "geometry/PxTetrahedronMesh.h"
#include "geometry/PxConvexMesh.h"
#include "geometry/PxHeightField.h"
#include "geometry/PxBVH.h"
#include "PxPhysXConfig.h"
#include "foundation/PxMutex.h"
#include "foundation/PxArray.h"
// PT: added for platforms that compile the onRefCountZero template immediately
#include "CmUtils.h"
#include "foundation/PxFoundation.h"
namespace physx
{
namespace Gu
{
class ConvexMesh;
class HeightField;
class TriangleMesh;
class TriangleMeshData;
class SoftBodyMesh;
class SoftBodyMeshData;
class TetrahedronMesh;
class TetrahedronMeshData;
class BVH;
struct ConvexHullInitData;
class BVHData;
class MeshFactoryListener
{
protected:
virtual ~MeshFactoryListener(){}
public:
virtual void onMeshFactoryBufferRelease(const PxBase* object, PxType type) = 0;
#if PX_SUPPORT_OMNI_PVD
virtual void onObjectAdd(const PxBase*) {}
virtual void onObjectRemove(const PxBase*) {}
#endif
};
#if PX_VC
#pragma warning(push)
#pragma warning( disable : 4251 ) // class needs to have dll-interface to be used by clients of class
#endif
class PX_PHYSX_COMMON_API MeshFactory : public PxUserAllocated
{
PX_NOCOPY(MeshFactory)
public:
MeshFactory();
protected:
virtual ~MeshFactory();
public:
void release();
// Triangle meshes
void addTriangleMesh(Gu::TriangleMesh* np, bool lock=true);
PxTriangleMesh* createTriangleMesh(PxInputStream& stream);
PxTriangleMesh* createTriangleMesh(void* triangleMeshData);
bool removeTriangleMesh(PxTriangleMesh&);
PxU32 getNbTriangleMeshes() const;
PxU32 getTriangleMeshes(PxTriangleMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// Tetrahedron meshes
void addTetrahedronMesh(Gu::TetrahedronMesh* np, bool lock = true);
PxTetrahedronMesh* createTetrahedronMesh(PxInputStream& stream);
PxTetrahedronMesh* createTetrahedronMesh(void* tetrahedronMeshData);
bool removeTetrahedronMesh(PxTetrahedronMesh&);
PxU32 getNbTetrahedronMeshes() const;
PxU32 getTetrahedronMeshes(PxTetrahedronMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// SoftBody meshes
void addSoftBodyMesh(Gu::SoftBodyMesh* np, bool lock = true);
PxSoftBodyMesh* createSoftBodyMesh(PxInputStream& stream);
PxSoftBodyMesh* createSoftBodyMesh(void* tetrahedronMeshData);
bool removeSoftBodyMesh(PxSoftBodyMesh&);
PxU32 getNbSoftBodyMeshes() const;
PxU32 getSoftBodyMeshes(PxSoftBodyMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// Convexes
void addConvexMesh(Gu::ConvexMesh* np, bool lock=true);
PxConvexMesh* createConvexMesh(PxInputStream&);
PxConvexMesh* createConvexMesh(void* convexMeshData);
bool removeConvexMesh(PxConvexMesh&);
PxU32 getNbConvexMeshes() const;
PxU32 getConvexMeshes(PxConvexMesh** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// Heightfields
void addHeightField(Gu::HeightField* np, bool lock=true);
PxHeightField* createHeightField(void* heightFieldMeshData);
PxHeightField* createHeightField(PxInputStream&);
bool removeHeightField(PxHeightField&);
PxU32 getNbHeightFields() const;
PxU32 getHeightFields(PxHeightField** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
// BVH
void addBVH(Gu::BVH* np, bool lock=true);
PxBVH* createBVH(PxInputStream&);
PxBVH* createBVH(void* bvhData);
bool removeBVH(PxBVH&);
PxU32 getNbBVHs() const;
PxU32 getBVHs(PxBVH** userBuffer, PxU32 bufferSize, PxU32 startIndex) const;
void addFactoryListener(MeshFactoryListener& listener);
void removeFactoryListener(MeshFactoryListener& listener);
void notifyFactoryListener(const PxBase*, PxType typeID);
bool remove(PxBase&);
protected:
PxTriangleMesh* createTriangleMesh(Gu::TriangleMeshData& data);
PxTetrahedronMesh* createTetrahedronMesh(Gu::TetrahedronMeshData& data);
PxSoftBodyMesh* createSoftBodyMesh(Gu::SoftBodyMeshData& data);
PxConvexMesh* createConvexMesh(Gu::ConvexHullInitData& data);
PxBVH* createBVH(Gu::BVHData& data);
mutable PxMutex mTrackingMutex;
private:
PxCoalescedHashSet<Gu::TriangleMesh*> mTriangleMeshes;
PxCoalescedHashSet<Gu::TetrahedronMesh*> mTetrahedronMeshes;
PxCoalescedHashSet<Gu::SoftBodyMesh*> mSoftBodyMeshes;
PxCoalescedHashSet<Gu::ConvexMesh*> mConvexMeshes;
PxCoalescedHashSet<Gu::HeightField*> mHeightFields;
PxCoalescedHashSet<Gu::BVH*> mBVHs;
PxArray<MeshFactoryListener*> mFactoryListeners;
#if PX_SUPPORT_OMNI_PVD
protected:
void notifyListenersAdd(const PxBase*);
void notifyListenersRemove(const PxBase*);
#endif
};
#if PX_VC
#pragma warning(pop)
#endif
template<typename T>
PX_INLINE void onRefCountZero(T* object, Gu::MeshFactory* mf, bool cndt, const char* errorMsg)
{
if(mf)
{
if(cndt || mf->remove(*object))
{
const PxType type = object->getConcreteType();
Cm::deletePxBase(object);
mf->notifyFactoryListener(object, type);
return;
}
// PT: if we reach this point, we didn't find the mesh in the Physics object => don't delete!
// This prevents deleting the object twice.
PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, errorMsg);
}
else
Cm::deletePxBase(object);
}
}
}
#endif
| 7,371 | C | 35.86 | 109 | 0.737485 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuOverlapTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuOverlapTests.h"
#include "GuIntersectionBoxBox.h"
#include "GuIntersectionSphereBox.h"
#include "GuDistancePointSegment.h"
#include "GuDistanceSegmentBox.h"
#include "GuDistanceSegmentSegment.h"
#include "GuSphere.h"
#include "GuBoxConversion.h"
#include "GuInternal.h"
#include "GuVecCapsule.h"
#include "GuVecConvexHull.h"
#include "GuVecBox.h"
#include "GuConvexMesh.h"
#include "GuHillClimbing.h"
#include "GuGJK.h"
#include "geometry/PxSphereGeometry.h"
#include "geometry/PxCustomGeometry.h"
#include "CmMatrix34.h"
using namespace physx;
using namespace Cm;
using namespace Gu;
// PT: TODO: why don't we use ShapeData for overlaps?
//returns the maximal vertex in shape space
// PT: this function should be removed. We already have 2 different project hull functions in PxcShapeConvex & GuGJKObjectSupport, this one looks like a weird mix of both!
static PxVec3 projectHull_( const ConvexHullData& hull,
float& minimum, float& maximum,
const PxVec3& localDir, // expected to be normalized
const PxMat33& vert2ShapeSkew)
{
PX_ASSERT(localDir.isNormalized());
//use property that x|My == Mx|y for symmetric M to avoid having to transform vertices.
const PxVec3 vertexSpaceDir = vert2ShapeSkew * localDir;
const PxVec3* Verts = hull.getHullVertices();
const PxVec3* bestVert = NULL;
if(!hull.mBigConvexRawData) // Brute-force, local space. Experiments show break-even point is around 32 verts.
{
PxU32 NbVerts = hull.mNbHullVertices;
float min_ = PX_MAX_F32;
float max_ = -PX_MAX_F32;
while(NbVerts--)
{
const float dp = (*Verts).dot(vertexSpaceDir);
min_ = physx::intrinsics::selectMin(min_, dp);
if(dp > max_) { max_ = dp; bestVert = Verts; }
Verts++;
}
minimum = min_;
maximum = max_;
PX_ASSERT(bestVert != NULL);
return vert2ShapeSkew * *bestVert;
}
else //*/if(1) // This version is better for objects with a lot of vertices
{
const PxU32 Offset = ComputeCubemapNearestOffset(vertexSpaceDir, hull.mBigConvexRawData->mSubdiv);
PxU32 MinID = hull.mBigConvexRawData->mSamples[Offset];
PxU32 MaxID = hull.mBigConvexRawData->getSamples2()[Offset];
localSearch(MinID, -vertexSpaceDir, Verts, hull.mBigConvexRawData);
localSearch(MaxID, vertexSpaceDir, Verts, hull.mBigConvexRawData);
minimum = (Verts[MinID].dot(vertexSpaceDir));
maximum = (Verts[MaxID].dot(vertexSpaceDir));
PX_ASSERT(maximum >= minimum);
return vert2ShapeSkew * Verts[MaxID];
}
}
static bool intersectSphereConvex(const PxTransform& sphereTransform, float radius, const ConvexMesh& mesh, const PxMeshScale& meshScale, const PxTransform& convexGlobalPose,
PxVec3*)
{
using namespace aos;
const Vec3V zeroV = V3Zero();
const ConvexHullData* hullData = &mesh.getHull();
const FloatV sphereRadius = FLoad(radius);
const Vec3V vScale = V3LoadU_SafeReadW(meshScale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&meshScale.rotation.x);
const PxMatTransformV aToB(convexGlobalPose.transformInv(sphereTransform));
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, meshScale.isIdentity());
const CapsuleV capsule(aToB.p, sphereRadius);
Vec3V contactA, contactB, normal;
FloatV dist;
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), convexHull.getCenter());
GjkStatus status = gjk(convexA, convexB, initialSearchDir, FZero(), contactA, contactB, normal, dist);
return status == GJK_CONTACT;
}
static bool intersectCapsuleConvex( const PxCapsuleGeometry& capsGeom, const PxTransform& capsGlobalPose,
const ConvexMesh& mesh, const PxMeshScale& meshScale, const PxTransform& convexGlobalPose,
PxVec3*)
{
using namespace aos;
const Vec3V zeroV = V3Zero();
const ConvexHullData* hull = &mesh.getHull();
const FloatV capsuleHalfHeight = FLoad(capsGeom.halfHeight);
const FloatV capsuleRadius = FLoad(capsGeom.radius);
const Vec3V vScale = V3LoadU_SafeReadW(meshScale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&meshScale.rotation.x);
const PxMatTransformV aToB(convexGlobalPose.transformInv(capsGlobalPose));
const ConvexHullV convexHull(hull, zeroV, vScale, vQuat, meshScale.isIdentity());
const CapsuleV capsule(aToB.p, aToB.rotate(V3Scale(V3UnitX(), capsuleHalfHeight)), capsuleRadius);
Vec3V contactA, contactB, normal;
FloatV dist;
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), convexHull.getCenter());
GjkStatus status = gjk(convexA, convexB, initialSearchDir, FZero(), contactA, contactB, normal, dist);
return status == GJK_CONTACT;
}
static bool intersectBoxConvex(const PxBoxGeometry& boxGeom, const PxTransform& boxGlobalPose,
const ConvexMesh& mesh, const PxMeshScale& meshScale, const PxTransform& convexGlobalPose,
PxVec3*)
{
// AP: see archived non-GJK version in //sw/physx/dev/pterdiman/graveyard/contactConvexBox.cpp
using namespace aos;
const Vec3V zeroV = V3Zero();
const ConvexHullData* hull = &mesh.getHull();
const Vec3V vScale = V3LoadU_SafeReadW(meshScale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&meshScale.rotation.x);
const Vec3V boxExtents = V3LoadU(boxGeom.halfExtents);
const PxMatTransformV aToB(convexGlobalPose.transformInv(boxGlobalPose));
const ConvexHullV convexHull(hull, zeroV, vScale, vQuat, meshScale.isIdentity());
const BoxV box(zeroV, boxExtents);
Vec3V contactA, contactB, normal;
FloatV dist;
const RelativeConvex<BoxV> convexA(box, aToB);
const LocalConvex<ConvexHullV> convexB(convexHull);
GjkStatus status = gjk(convexA, convexB, aToB.p, FZero(), contactA, contactB, normal, dist);
//PX_PRINTF("BOX status = %i, overlap = %i, PxVec3(%f, %f, %f)\n", status, overlap, boxGlobalPose.p.x, boxGlobalPose.p.y, boxGlobalPose.p.z);
return status == GJK_CONTACT;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE PxVec3* getCachedAxis(TriggerCache* cache)
{
if(cache && cache->state==TRIGGER_OVERLAP)
return &cache->dir;
else
return NULL;
}
static PX_FORCE_INLINE bool updateTriggerCache(bool overlap, TriggerCache* cache)
{
if(cache)
{
if(overlap)
cache->state = TRIGGER_OVERLAP;
else
cache->state = TRIGGER_DISJOINT;
}
return overlap;
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Sphere-vs-shape
static bool GeomOverlapCallback_SphereSphere(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eSPHERE);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom0 = static_cast<const PxSphereGeometry&>(geom0);
const PxSphereGeometry& sphereGeom1 = static_cast<const PxSphereGeometry&>(geom1);
const PxVec3 delta = pose1.p - pose0.p;
const PxReal r = sphereGeom0.radius + sphereGeom1.radius;
return delta.magnitudeSquared() <= r*r; // PT: objects are defined as closed, so we return 'true' in case of equality
}
static bool GeomOverlapCallback_SpherePlane(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::ePLANE);
PX_UNUSED(cache);
PX_UNUSED(geom1);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
return getPlane(pose1).distance(pose0.p) <= sphereGeom.radius; // PT: objects are defined as closed, so we return 'true' in case of equality
}
static bool GeomOverlapCallback_SphereCapsule(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom1);
// PT: TODO: remove this useless conversion
const PxVec3 capsuleHalfHeightVector = getCapsuleHalfHeightVector(pose1, capsuleGeom);
const PxReal r = sphereGeom.radius + capsuleGeom.radius;
return distancePointSegmentSquared(capsuleHalfHeightVector, -capsuleHalfHeightVector, pose0.p - pose1.p) <= r*r; // PT: objects are defined as closed, so we return 'true' in case of equality
}
static bool GeomOverlapCallback_SphereBox(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
// PT: TODO: remove this useless conversion
Box obb;
buildFrom(obb, pose1.p, boxGeom.halfExtents, pose1.q);
return intersectSphereBox(Sphere(pose0.p, sphereGeom.radius), obb);
}
static bool GeomOverlapCallback_SphereConvex(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
PxVec3 cachedSepAxis;
PxVec3* tmp = getCachedAxis(cache);
if(tmp)
cachedSepAxis = *tmp;
else
cachedSepAxis = PxVec3(0,0,1.f);
const bool overlap = intersectSphereConvex(pose0, sphereGeom.radius,
*cm,
convexGeom.scale, pose1,
&cachedSepAxis);
if(cache && overlap)
cache->dir = cachedSepAxis;
return updateTriggerCache(overlap, cache);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Plane-vs-shape
static bool GeomOverlapCallback_PlaneCapsule(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(geom0);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom1);
// PT: TODO: remove this useless conversion
Capsule capsule;
getCapsule(capsule, capsuleGeom, pose1);
const PxPlane plane = getPlane(pose0);
// We handle the capsule-plane collision with 2 sphere-plane collisions.
// Seems ok so far, since plane is infinite.
if(plane.distance(capsule.p0) <= capsule.radius) // PT: objects are defined as closed, so we return 'true' in case of equality
return true;
if(plane.distance(capsule.p1) <= capsule.radius) // PT: objects are defined as closed, so we return 'true' in case of equality
return true;
return false;
}
/*static bool intersectPlaneBox(const PxPlane& plane, const Box& box)
{
PxVec3 pts[8];
box.computeBoxPoints(pts);
for(PxU32 i=0;i<8;i++)
{
if(plane.distance(pts[i]) <= 0.0f) // PT: objects are defined as closed, so we return 'true' in case of equality
return true;
}
return false;
}*/
static bool GeomOverlapCallback_PlaneBox(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(geom0);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
// I currently use the same code as for contact generation but maybe we could do something faster (in theory testing
// only 2 pts is enough).
const Matrix34FromTransform absPose(pose1);
const PxPlane worldPlane = getPlane(pose0);
for(int vx=-1; vx<=1; vx+=2)
for(int vy=-1; vy<=1; vy+=2)
for(int vz=-1; vz<=1; vz+=2)
{
const PxVec3 v = absPose.transform(PxVec3(PxReal(vx),PxReal(vy),PxReal(vz)).multiply(boxGeom.halfExtents));
if(worldPlane.distance(v) <= 0.0f) // PT: objects are defined as closed, so we return 'true' in case of equality
return true;
}
return false;
}
static bool GeomOverlapCallback_PlaneConvex(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::ePLANE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(geom0);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
//find plane normal in shape space of convex:
// PT:: tag: scalar transform*transform
const PxTransform plane2convex = pose1.getInverse().transform(pose0);
const PxPlane shapeSpacePlane = getPlane(plane2convex);
PxReal minimum, maximum;
projectHull_(cm->getHull(), minimum, maximum, shapeSpacePlane.n, toMat33(convexGeom.scale));
return (minimum <= -shapeSpacePlane.d);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Capsule-vs-shape
static bool GeomOverlapCallback_CapsuleCapsule(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCAPSULE);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom0 = static_cast<const PxCapsuleGeometry&>(geom0);
const PxCapsuleGeometry& capsuleGeom1 = static_cast<const PxCapsuleGeometry&>(geom1);
// PT: move computation to local space for improved accuracy
const PxVec3 delta = pose1.p - pose0.p;
// PT: TODO: remove this useless conversion
const PxVec3 capsuleHalfHeightVector0 = getCapsuleHalfHeightVector(pose0, capsuleGeom0);
const PxVec3 capsuleHalfHeightVector1 = getCapsuleHalfHeightVector(pose1, capsuleGeom1);
const PxReal squareDist = distanceSegmentSegmentSquared(-capsuleHalfHeightVector0, capsuleHalfHeightVector0*2.0f,
delta-capsuleHalfHeightVector1, capsuleHalfHeightVector1*2.0f);
const PxReal r = capsuleGeom0.radius + capsuleGeom1.radius;
return squareDist <= r*r; // PT: objects are defined as closed, so we return 'true' in case of equality
}
static bool GeomOverlapCallback_CapsuleBox(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom1);
// PT: move computation to local space for improved accuracy
const PxVec3 delta = pose1.p - pose0.p;
// PT: TODO: remove this useless conversion
const PxVec3 capsuleHalfHeightVector = getCapsuleHalfHeightVector(pose0, capsuleGeom);
// PT: TODO: remove this useless conversion
const PxMat33Padded obbRot(pose1.q);
// PT: objects are defined as closed, so we return 'true' in case of equality
return distanceSegmentBoxSquared(capsuleHalfHeightVector, -capsuleHalfHeightVector, delta, boxGeom.halfExtents, obbRot) <= capsuleGeom.radius*capsuleGeom.radius;
}
static bool GeomOverlapCallback_CapsuleConvex(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
PxVec3 cachedSepAxis;
PxVec3* tmp = getCachedAxis(cache);
if(tmp)
cachedSepAxis = *tmp;
else
cachedSepAxis = PxVec3(0,0,1.0f);
const bool overlap = intersectCapsuleConvex(capsuleGeom, pose0, *cm, convexGeom.scale, pose1, &cachedSepAxis);
if(cache && overlap)
cache->dir = cachedSepAxis;
return updateTriggerCache(overlap, cache);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Box-vs-shape
static bool GeomOverlapCallback_BoxBox(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eBOX);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxBoxGeometry& boxGeom0 = static_cast<const PxBoxGeometry&>(geom0);
const PxBoxGeometry& boxGeom1 = static_cast<const PxBoxGeometry&>(geom1);
// PT: TODO: remove this useless conversion
return intersectOBBOBB( boxGeom0.halfExtents, pose0.p, PxMat33Padded(pose0.q),
boxGeom1.halfExtents, pose1.p, PxMat33Padded(pose1.q), true);
}
static bool GeomOverlapCallback_BoxConvex(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom1);
ConvexMesh* cm = static_cast<ConvexMesh*>(convexGeom.convexMesh);
PxVec3 cachedSepAxis;
PxVec3* tmp = getCachedAxis(cache);
if(tmp)
cachedSepAxis = *tmp;
else
cachedSepAxis = PxVec3(0.0f, 0.0f, 1.0f);
const bool overlap = intersectBoxConvex(boxGeom, pose0, *cm, convexGeom.scale, pose1, &cachedSepAxis);
if(cache && overlap)
cache->dir = cachedSepAxis;
return updateTriggerCache(overlap, cache);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Convex-vs-shape
static bool GeomOverlapCallback_ConvexConvex(GU_OVERLAP_FUNC_PARAMS)
{
using namespace aos;
PX_ASSERT(geom0.getType()==PxGeometryType::eCONVEXMESH);
PX_ASSERT(geom1.getType()==PxGeometryType::eCONVEXMESH);
PX_UNUSED(threadContext);
const Vec3V zeroV = V3Zero();
const PxConvexMeshGeometry& convexGeom0 = static_cast<const PxConvexMeshGeometry&>(geom0);
const PxConvexMeshGeometry& convexGeom1 = static_cast<const PxConvexMeshGeometry&>(geom1);
const ConvexMesh* cm0 = static_cast<ConvexMesh*>(convexGeom0.convexMesh);
const ConvexMesh* cm1 = static_cast<ConvexMesh*>(convexGeom1.convexMesh);
bool overlap;
{
const ConvexHullData* hullData0 = &cm0->getHull();
const ConvexHullData* hullData1 = &cm1->getHull();
const Vec3V vScale0 = V3LoadU_SafeReadW(convexGeom0.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat0 = QuatVLoadU(&convexGeom0.scale.rotation.x);
const Vec3V vScale1 = V3LoadU_SafeReadW(convexGeom1.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat1 = QuatVLoadU(&convexGeom1.scale.rotation.x);
const QuatV q0 = QuatVLoadU(&pose0.q.x);
const Vec3V p0 = V3LoadU(&pose0.p.x);
const QuatV q1 = QuatVLoadU(&pose1.q.x);
const Vec3V p1 = V3LoadU(&pose1.p.x);
const PxTransformV transf0(p0, q0);
const PxTransformV transf1(p1, q1);
const PxMatTransformV aToB(transf1.transformInv(transf0));
const ConvexHullV convexHull0(hullData0, zeroV, vScale0, vQuat0, convexGeom0.scale.isIdentity());
const ConvexHullV convexHull1(hullData1, zeroV, vScale1, vQuat1, convexGeom1.scale.isIdentity());
Vec3V contactA, contactB, normal;
FloatV dist;
const RelativeConvex<ConvexHullV> convexA(convexHull0, aToB);
const LocalConvex<ConvexHullV> convexB(convexHull1);
GjkStatus status = gjk(convexA, convexB, aToB.p, FZero(), contactA, contactB, normal, dist);
overlap = (status == GJK_CONTACT);
}
return updateTriggerCache(overlap, cache);
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static bool GeomOverlapCallback_NotSupported(GU_OVERLAP_FUNC_PARAMS)
{
PX_ALWAYS_ASSERT_MESSAGE("NOT SUPPORTED");
PX_UNUSED(threadContext);
PX_UNUSED(cache);
PX_UNUSED(pose0);
PX_UNUSED(pose1);
PX_UNUSED(geom0);
PX_UNUSED(geom1);
return false;
}
bool GeomOverlapCallback_SphereMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_CapsuleMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_BoxMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_ConvexMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_MeshMesh (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_SphereHeightfield (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_CapsuleHeightfield (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_BoxHeightfield (GU_OVERLAP_FUNC_PARAMS);
bool GeomOverlapCallback_ConvexHeightfield (GU_OVERLAP_FUNC_PARAMS);
static bool GeomOverlapCallback_CustomGeometry(GU_OVERLAP_FUNC_PARAMS)
{
PX_UNUSED(cache);
if(geom0.getType() == PxGeometryType::eCUSTOM)
return static_cast<const PxCustomGeometry&>(geom0).callbacks->overlap(geom0, pose0, geom1, pose1, threadContext);
if(geom1.getType() == PxGeometryType::eCUSTOM)
return static_cast<const PxCustomGeometry&>(geom1).callbacks->overlap(geom1, pose1, geom0, pose0, threadContext);
return false;
}
GeomOverlapTable gGeomOverlapMethodTable[] =
{
//PxGeometryType::eSPHERE
{
GeomOverlapCallback_SphereSphere, //PxGeometryType::eSPHERE
GeomOverlapCallback_SpherePlane, //PxGeometryType::ePLANE
GeomOverlapCallback_SphereCapsule, //PxGeometryType::eCAPSULE
GeomOverlapCallback_SphereBox, //PxGeometryType::eBOX
GeomOverlapCallback_SphereConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_SphereMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_SphereHeightfield, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePLANE
{
0, //PxGeometryType::eSPHERE
GeomOverlapCallback_NotSupported, //PxGeometryType::ePLANE
GeomOverlapCallback_PlaneCapsule, //PxGeometryType::eCAPSULE
GeomOverlapCallback_PlaneBox, //PxGeometryType::eBOX
GeomOverlapCallback_PlaneConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCAPSULE
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
GeomOverlapCallback_CapsuleCapsule, //PxGeometryType::eCAPSULE
GeomOverlapCallback_CapsuleBox, //PxGeometryType::eBOX
GeomOverlapCallback_CapsuleConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_CapsuleMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_CapsuleHeightfield, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eBOX
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
GeomOverlapCallback_BoxBox, //PxGeometryType::eBOX
GeomOverlapCallback_BoxConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_BoxMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_BoxHeightfield, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCONVEXMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
GeomOverlapCallback_ConvexConvex, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_ConvexMesh, //PxGeometryType::eTRIANGLEMESH //not used: mesh always uses swept method for midphase.
GeomOverlapCallback_ConvexHeightfield, //PxGeometryType::eHEIGHTFIELD //TODO: make HF midphase that will mask this
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::ePARTICLESYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTETRAHEDRONMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_NotSupported, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eTRIANGLEMESH
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
GeomOverlapCallback_MeshMesh, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eHEIGHTFIELD
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
GeomOverlapCallback_NotSupported, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eHAIRSYSTEM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
GeomOverlapCallback_NotSupported, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
//PxGeometryType::eCUSTOM
{
0, //PxGeometryType::eSPHERE
0, //PxGeometryType::ePLANE
0, //PxGeometryType::eCAPSULE
0, //PxGeometryType::eBOX
0, //PxGeometryType::eCONVEXMESH
0, //PxGeometryType::ePARTICLESYSTEM
0, //PxGeometryType::eTETRAHEDRONMESH
0, //PxGeometryType::eTRIANGLEMESH
0, //PxGeometryType::eHEIGHTFIELD
0, //PxGeometryType::eHAIRSYSTEM
GeomOverlapCallback_CustomGeometry, //PxGeometryType::eCUSTOM
},
};
PX_COMPILE_TIME_ASSERT(sizeof(gGeomOverlapMethodTable) / sizeof(gGeomOverlapMethodTable[0]) == PxGeometryType::eGEOMETRY_COUNT);
const GeomOverlapTable* Gu::getOverlapFuncTable()
{
return gGeomOverlapMethodTable;
}
| 30,560 | C++ | 37.489924 | 191 | 0.732919 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuMetaData.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxIO.h"
#include "common/PxMetaData.h"
#include "GuHeightField.h"
#include "GuConvexMeshData.h"
#include "GuBigConvexData2.h"
#include "GuConvexMesh.h"
#include "GuTriangleMesh.h"
#include "GuTriangleMeshBV4.h"
#include "GuTriangleMeshRTree.h"
#include "foundation/PxIntrinsics.h"
using namespace physx;
using namespace Cm;
using namespace Gu;
///////////////////////////////////////////////////////////////////////////////
static void getBinaryMetaData_Valency(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, Valency)
PX_DEF_BIN_METADATA_ITEM(stream, Valency, PxU16, mCount, 0)
PX_DEF_BIN_METADATA_ITEM(stream, Valency, PxU16, mOffset, 0)
}
static void getBinaryMetaData_BigConvexRawData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, BigConvexRawData)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, PxU16, mSubdiv, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, PxU16, mNbSamples, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, PxU8, mSamples, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, PxU32, mNbVerts, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, PxU32, mNbAdjVerts, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, Valency, mValencies, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexRawData, PxU8, mAdjacentVerts, PxMetaDataFlag::ePTR)
}
void SDF::getBinaryMetaData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, Dim3)
PX_DEF_BIN_METADATA_ITEM(stream, Dim3, PxU32, x, 0)
PX_DEF_BIN_METADATA_ITEM(stream, Dim3, PxU32, y, 0)
PX_DEF_BIN_METADATA_ITEM(stream, Dim3, PxU32, z, 0)
PX_DEF_BIN_METADATA_CLASS(stream, SDF)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxVec3, mMeshLower, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxReal, mSpacing, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, Dim3, mDims, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU32, mNumSdfs, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxReal, mSdf, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU32, mSubgridSize, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU32, mNumStartSlots, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU32, mSubgridStartSlots, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU32, mNumSubgridSdfs, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU8, mSubgridSdf, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, Dim3, mSdfSubgrids3DTexBlockDim, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxReal, mSubgridsMinSdfValue, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxReal, mSubgridsMaxSdfValue, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, PxU32, mBytesPerSparsePixel, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SDF, bool, mOwnsMemory, 0)
}
void BigConvexData::getBinaryMetaData(PxOutputStream& stream)
{
getBinaryMetaData_Valency(stream);
getBinaryMetaData_BigConvexRawData(stream);
PX_DEF_BIN_METADATA_CLASS(stream, BigConvexData)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexData, BigConvexRawData, mData, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BigConvexData, void, mVBuffer, PxMetaDataFlag::ePTR)
//------ Extra-data ------
// mData.mSamples
// PT: can't use one array of PxU16 since we don't want to flip those bytes during conversion.
// PT: We only align the first array for DE1340, but the second one shouldn't be aligned since
// both are written as one unique block of memory.
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, BigConvexData, PxU8, mData.mNbSamples, PX_SERIAL_ALIGN, 0)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, BigConvexData, PxU8, mData.mNbSamples, 0, 0)
// mData.mValencies
// PT: same here, we must only align the first array
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, BigConvexData, Valency, mData.mNbVerts, PX_SERIAL_ALIGN, 0)
PX_DEF_BIN_METADATA_EXTRA_ALIGN(stream, BigConvexData, PX_SERIAL_ALIGN)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, BigConvexData, PxU8, mData.mNbAdjVerts, 0, 0)
}
static void getBinaryMetaData_InternalObjectsData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, InternalObjectsData)
PX_DEF_BIN_METADATA_ITEM(stream, InternalObjectsData, PxReal, mRadius, 0)
PX_DEF_BIN_METADATA_ITEMS_AUTO(stream, InternalObjectsData, PxReal, mExtents, 0)
}
static void getBinaryMetaData_HullPolygonData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, HullPolygonData)
PX_DEF_BIN_METADATA_ITEMS_AUTO(stream, HullPolygonData, PxReal, mPlane, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HullPolygonData, PxU16, mVRef8, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HullPolygonData, PxU8, mNbVerts, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HullPolygonData, PxU8, mMinIndex, 0)
}
static void getBinaryMetaData_ConvexHullData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, ConvexHullData)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, PxBounds3, mAABB, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, PxVec3, mCenterOfMass, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, HullPolygonData, mPolygons, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, BigConvexRawData, mBigConvexRawData, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, SDF, mSdfData, PxMetaDataFlag::ePTR)
//ML: the most significant bit of mNbEdges is used to indicate whether we have grb data or not. However, we don't support grb data
//in serialization so we have to mask the most significant bit and force the contact gen run on CPU code path
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, PxU16, mNbEdges, PxMetaDataFlag::eCOUNT_MASK_MSB)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, PxU8, mNbHullVertices, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, PxU8, mNbPolygons, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexHullData, InternalObjectsData, mInternal, 0)
}
void Gu::ConvexMesh::getBinaryMetaData(PxOutputStream& stream)
{
getBinaryMetaData_InternalObjectsData(stream);
getBinaryMetaData_HullPolygonData(stream);
getBinaryMetaData_ConvexHullData(stream);
SDF::getBinaryMetaData(stream);
BigConvexData::getBinaryMetaData(stream);
PX_DEF_BIN_METADATA_VCLASS(stream,ConvexMesh)
PX_DEF_BIN_METADATA_BASE_CLASS(stream,ConvexMesh, PxBase)
//
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, ConvexHullData, mHullData, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, PxU32, mNb, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, SDF, mSdfData, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, BigConvexData, mBigConvexData, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, PxReal, mMass, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, PxMat33, mInertia, 0)
PX_DEF_BIN_METADATA_ITEM(stream, ConvexMesh, GuMeshFactory, mMeshFactory, PxMetaDataFlag::ePTR)
//------ Extra-data ------
// mHullData.mPolygons (Gu::HullPolygonData, PxVec3, PxU8*2, PxU8)
// PT: we only align the first array since the other ones are contained within it
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, HullPolygonData, mHullData.mNbPolygons, PX_SERIAL_ALIGN, 0)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxVec3, mHullData.mNbHullVertices, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxU8, mHullData.mNbEdges, 0, PxMetaDataFlag::eCOUNT_MASK_MSB)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxU8, mHullData.mNbEdges, 0, PxMetaDataFlag::eCOUNT_MASK_MSB)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxU8, mHullData.mNbHullVertices, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxU8, mHullData.mNbHullVertices, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxU8, mHullData.mNbHullVertices, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, Gu::ConvexMesh, PxU8, mNb, 0, PxMetaDataFlag::eCOUNT_MASK_MSB)
PX_DEF_BIN_METADATA_EXTRA_ALIGN(stream, ConvexMesh, 4)
//mSdfData this is currently broken
//PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, Gu::SDF, PxReal, mSdf, mNumSdfs, 0, PX_SERIAL_ALIGN)
//PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, Gu::SDF, PxU32, mSubgridStartSlots, mNumStartSlots, 0, PX_SERIAL_ALIGN)
//PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, Gu::SDF, PxU8, mSubgridSdf, mNumSubgridSdfs, 0, PX_SERIAL_ALIGN)
// mBigConvexData
PX_DEF_BIN_METADATA_EXTRA_ITEM(stream, Gu::ConvexMesh, BigConvexData, mBigConvexData, PX_SERIAL_ALIGN)
}
///////////////////////////////////////////////////////////////////////////////
static void getBinaryMetaData_PxHeightFieldSample(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, PxHeightFieldSample)
PX_DEF_BIN_METADATA_ITEM(stream, PxHeightFieldSample, PxI16, height, 0)
PX_DEF_BIN_METADATA_ITEM(stream, PxHeightFieldSample, PxBitAndByte, materialIndex0, 0)
PX_DEF_BIN_METADATA_ITEM(stream, PxHeightFieldSample, PxBitAndByte, materialIndex1, 0)
PX_DEF_BIN_METADATA_TYPEDEF(stream, PxBitAndByte, PxU8)
}
static void getBinaryMetaData_HeightFieldData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_TYPEDEF(stream, PxHeightFieldFlags, PxU16)
PX_DEF_BIN_METADATA_TYPEDEF(stream, PxHeightFieldFormat::Enum, PxU32)
PX_DEF_BIN_METADATA_CLASS(stream, HeightFieldData)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxBounds3, mAABB, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxU32, rows, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxU32, columns, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxU32, rowLimit, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxU32, colLimit, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxU32, nbColumns, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxHeightFieldSample, samples, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxReal, convexEdgeThreshold, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxHeightFieldFlags, flags, 0)
#ifdef EXPLICIT_PADDING_METADATA
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxU16, paddAfterFlags, PxMetaDataFlag::ePADDING)
#endif
PX_DEF_BIN_METADATA_ITEM(stream, HeightFieldData, PxHeightFieldFormat::Enum, format, 0)
}
void Gu::HeightField::getBinaryMetaData(PxOutputStream& stream)
{
getBinaryMetaData_PxHeightFieldSample(stream);
getBinaryMetaData_HeightFieldData(stream);
PX_DEF_BIN_METADATA_TYPEDEF(stream, PxMaterialTableIndex, PxU16)
PX_DEF_BIN_METADATA_VCLASS(stream, HeightField)
PX_DEF_BIN_METADATA_BASE_CLASS(stream, HeightField, PxBase)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, HeightFieldData, mData, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, PxU32, mSampleStride, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, PxU32, mNbSamples, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, PxReal, mMinHeight, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, PxReal, mMaxHeight, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, PxU32, mModifyCount, 0)
PX_DEF_BIN_METADATA_ITEM(stream, HeightField, GuMeshFactory, mMeshFactory, PxMetaDataFlag::ePTR)
//------ Extra-data ------
// mData.samples
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, HeightField, PxHeightFieldSample, mNbSamples, PX_SERIAL_ALIGN, 0) // PT: ### try to remove mNbSamples later
}
///////////////////////////////////////////////////////////////////////////////
static void getBinaryMetaData_RTreePage(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, RTreePage)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxReal, minx, 0, RTREE_N)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxReal, miny, 0, RTREE_N)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxReal, minz, 0, RTREE_N)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxReal, maxx, 0, RTREE_N)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxReal, maxy, 0, RTREE_N)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxReal, maxz, 0, RTREE_N)
PX_DEF_BIN_METADATA_ITEMS(stream, RTreePage, PxU32, ptrs, 0, RTREE_N)
}
void RTree::getBinaryMetaData(PxOutputStream& stream)
{
getBinaryMetaData_RTreePage(stream);
PX_DEF_BIN_METADATA_CLASS(stream, RTree)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxVec4, mBoundsMin, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxVec4, mBoundsMax, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxVec4, mInvDiagonal, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxVec4, mDiagonalScaler, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxU32, mPageSize, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxU32, mNumRootPages, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxU32, mNumLevels, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxU32, mTotalNodes, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxU32, mTotalPages, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, PxU32, mFlags, 0)
PX_DEF_BIN_METADATA_ITEM(stream, RTree, RTreePage, mPages, PxMetaDataFlag::ePTR)
//------ Extra-data ------
// mPages
PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream,RTree, RTreePage, mTotalPages, 128, 0)
}
///////////////////////////////////////////////////////////////////////////////
void SourceMeshBase::getBinaryMetaData(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_VCLASS(stream, SourceMeshBase)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMeshBase, PxU32, mNbVerts, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMeshBase, PxVec3, mVerts, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMeshBase, PxU32, mType, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMeshBase, PxU32, mRemap, PxMetaDataFlag::ePTR)
}
void SourceMesh::getBinaryMetaData(PxOutputStream& stream)
{
// SourceMesh
PX_DEF_BIN_METADATA_VCLASS(stream, SourceMesh)
PX_DEF_BIN_METADATA_BASE_CLASS(stream, SourceMesh, SourceMeshBase)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMesh, PxU32, mNbTris, 0)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMesh, void, mTriangles32, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, SourceMesh, void, mTriangles16, PxMetaDataFlag::ePTR)
}
static void getBinaryMetaData_BVDataPackedQ(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, QuantizedAABB)
PX_DEF_BIN_METADATA_ITEM(stream, QuantizedAABB, PxU16, mData[0].mExtents, 0)
PX_DEF_BIN_METADATA_ITEM(stream, QuantizedAABB, PxI16, mData[0].mCenter, 0)
PX_DEF_BIN_METADATA_ITEM(stream, QuantizedAABB, PxU16, mData[1].mExtents, 0)
PX_DEF_BIN_METADATA_ITEM(stream, QuantizedAABB, PxI16, mData[1].mCenter, 0)
PX_DEF_BIN_METADATA_ITEM(stream, QuantizedAABB, PxU16, mData[2].mExtents, 0)
PX_DEF_BIN_METADATA_ITEM(stream, QuantizedAABB, PxI16, mData[2].mCenter, 0)
PX_DEF_BIN_METADATA_CLASS(stream, BVDataPackedQ)
PX_DEF_BIN_METADATA_ITEM(stream, BVDataPackedQ, QuantizedAABB, mAABB, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BVDataPackedQ, PxU32, mData, 0)
}
static void getBinaryMetaData_BVDataPackedNQ(PxOutputStream& stream)
{
PX_DEF_BIN_METADATA_CLASS(stream, CenterExtents)
PX_DEF_BIN_METADATA_ITEM(stream, CenterExtents, PxVec3, mCenter, 0)
PX_DEF_BIN_METADATA_ITEM(stream, CenterExtents, PxVec3, mExtents, 0)
PX_DEF_BIN_METADATA_CLASS(stream, BVDataPackedNQ)
PX_DEF_BIN_METADATA_ITEM(stream, BVDataPackedNQ, CenterExtents, mAABB, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BVDataPackedNQ, PxU32, mData, 0)
}
void BV4Tree::getBinaryMetaData(PxOutputStream& stream)
{
getBinaryMetaData_BVDataPackedQ(stream);
getBinaryMetaData_BVDataPackedNQ(stream);
PX_DEF_BIN_METADATA_CLASS(stream, LocalBounds)
PX_DEF_BIN_METADATA_ITEM(stream, LocalBounds, PxVec3, mCenter, 0)
PX_DEF_BIN_METADATA_ITEM(stream, LocalBounds, float, mExtentsMagnitude, 0)
PX_DEF_BIN_METADATA_CLASS(stream, BV4Tree)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, void, mMeshInterface, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, LocalBounds, mLocalBounds, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, PxU32, mNbNodes, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, void, mNodes, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, PxU32, mInitData, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, PxVec3, mCenterOrMinCoeff, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, PxVec3, mExtentsOrMaxCoeff, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, bool, mUserAllocated, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, bool, mQuantized, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, bool, mIsEdgeSet, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4Tree, bool, mPadding, PxMetaDataFlag::ePADDING)
//------ Extra-data ------
// PX_DEF_BIN_METADATA_EXTRA_ARRAY(stream, BV4Tree, BVDataPackedQ, mNbNodes, 16, 0)
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, BV4Tree, BVDataPackedQ, mQuantized, mNbNodes, PxMetaDataFlag::Enum(0), PX_SERIAL_ALIGN)
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, BV4Tree, BVDataPackedNQ, mQuantized, mNbNodes, PxMetaDataFlag::eCONTROL_FLIP, PX_SERIAL_ALIGN)
}
///////////////////////////////////////////////////////////////////////////////
void Gu::TriangleMesh::getBinaryMetaData(PxOutputStream& stream)
{
SDF::getBinaryMetaData(stream);
PX_DEF_BIN_METADATA_VCLASS(stream, TriangleMesh)
PX_DEF_BIN_METADATA_BASE_CLASS(stream, TriangleMesh, PxBase)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mNbVertices, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mNbTriangles, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxVec3, mVertices, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, void, mTriangles, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxBounds3, mAABB, 0) // PT: warning, this is actually a CenterExtents
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU8, mExtraTrigData, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxReal, mGeomEpsilon, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU8, mFlags, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU16, mMaterialIndices, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mFaceRemap, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mAdjacencies, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, GuMeshFactory, mMeshFactory, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, void, mEdgeList, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxReal, mMass, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxMat33, mInertia, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxVec3, mLocalCenterOfMass, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, void, mGRB_triIndices, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, void, mGRB_triAdjacencies, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mGRB_faceRemap, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mGRB_faceRemapInverse, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, Gu::BV32Tree, mGRB_BV32Tree, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, SDF, mSdfData, 0)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mAccumulatedTrianglesRef, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mTrianglesReferences, PxMetaDataFlag::ePTR)
PX_DEF_BIN_METADATA_ITEM(stream, TriangleMesh, PxU32, mNbTrianglesReferences, 0)
//------ Extra-data ------
// mVertices
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxVec3, mVertices, mNbVertices, 0, PX_SERIAL_ALIGN)
// mTriangles
// PT: quite tricky here: we exported either an array of PxU16s or an array of PxU32s. We trick the converter by
// pretending we exported both, with the same control variable (m16BitIndices) but opposed control flags. Also there's
// no way to capture "mNumTriangles*3" using the macros, so we just pretend we exported 3 buffers instead of 1.
// But since in reality it's all the same buffer, only the first one is declared as aligned.
PX_DEF_BIN_METADATA_EXTRA_ITEMS_MASKED_CONTROL(stream, TriangleMesh, PxU16, mFlags, PxTriangleMeshFlag::e16_BIT_INDICES, mNbTriangles, 0, PX_SERIAL_ALIGN)
PX_DEF_BIN_METADATA_EXTRA_ITEMS_MASKED_CONTROL(stream, TriangleMesh, PxU16, mFlags, PxTriangleMeshFlag::e16_BIT_INDICES, mNbTriangles, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ITEMS_MASKED_CONTROL(stream, TriangleMesh, PxU16, mFlags, PxTriangleMeshFlag::e16_BIT_INDICES, mNbTriangles, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ITEMS_MASKED_CONTROL(stream, TriangleMesh, PxU32, mFlags, PxTriangleMeshFlag::e16_BIT_INDICES, mNbTriangles, PxMetaDataFlag::eCONTROL_FLIP, PX_SERIAL_ALIGN)
PX_DEF_BIN_METADATA_EXTRA_ITEMS_MASKED_CONTROL(stream, TriangleMesh, PxU32, mFlags, PxTriangleMeshFlag::e16_BIT_INDICES, mNbTriangles, PxMetaDataFlag::eCONTROL_FLIP, 0)
PX_DEF_BIN_METADATA_EXTRA_ITEMS_MASKED_CONTROL(stream, TriangleMesh, PxU32, mFlags, PxTriangleMeshFlag::e16_BIT_INDICES, mNbTriangles, PxMetaDataFlag::eCONTROL_FLIP, 0)
// mExtraTrigData
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU8, mExtraTrigData, mNbTriangles, 0, PX_SERIAL_ALIGN)
// mMaterialIndices
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU16, mMaterialIndices, mNbTriangles, 0, PX_SERIAL_ALIGN)
// mFaceRemap
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mFaceRemap, mNbTriangles, 0, PX_SERIAL_ALIGN)
// mAdjacencies
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mAdjacencies, mNbTriangles, 0, PX_SERIAL_ALIGN)
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mAdjacencies, mNbTriangles, 0, 0)
PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mAdjacencies, mNbTriangles, 0, 0)
// GPU data missing!
// mSdf, this is currently broken
//PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxReal, mSdfData.mSdf, mSdfData.mNumSdfs, 0, PX_SERIAL_ALIGN)
//PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mSdfData.mSubgridStartSlots, mSdfData.mNumStartSlots, 0, PX_SERIAL_ALIGN)
//PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU8, mSdfData.mSubgridSdf, mSdfData.mNumSubgridSdfs, 0, PX_SERIAL_ALIGN)
// mAccumulatedTrianglesRef
// PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mAccumulatedTrianglesRef, mNbTrianglesReferences, 0, PX_SERIAL_ALIGN)
// mTrianglesReferences
// PX_DEF_BIN_METADATA_EXTRA_ITEMS(stream, TriangleMesh, PxU32, mTrianglesReferences, mNbTrianglesReferences, 0, PX_SERIAL_ALIGN)
#ifdef EXPLICIT_PADDING_METADATA
PX_DEF_BIN_METADATA_ITEMS_AUTO(stream, TriangleMesh, PxU32, mPaddingFromInternalMesh, PxMetaDataFlag::ePADDING)
#endif
}
void Gu::RTreeTriangleMesh::getBinaryMetaData(PxOutputStream& stream)
{
RTree::getBinaryMetaData(stream);
PX_DEF_BIN_METADATA_VCLASS(stream, RTreeTriangleMesh)
PX_DEF_BIN_METADATA_BASE_CLASS(stream, RTreeTriangleMesh, TriangleMesh)
PX_DEF_BIN_METADATA_ITEM(stream, RTreeTriangleMesh, RTree, mRTree, 0)
}
void Gu::BV4TriangleMesh::getBinaryMetaData(PxOutputStream& stream)
{
SourceMeshBase::getBinaryMetaData(stream);
SourceMesh::getBinaryMetaData(stream);
BV4Tree::getBinaryMetaData(stream);
PX_DEF_BIN_METADATA_VCLASS(stream, BV4TriangleMesh)
PX_DEF_BIN_METADATA_BASE_CLASS(stream, BV4TriangleMesh, TriangleMesh)
PX_DEF_BIN_METADATA_ITEM(stream, BV4TriangleMesh, SourceMesh, mMeshInterface, 0)
PX_DEF_BIN_METADATA_ITEM(stream, BV4TriangleMesh, BV4Tree, mBV4Tree, 0)
}
///////////////////////////////////////////////////////////////////////////////
| 25,089 | C++ | 50.519507 | 183 | 0.75009 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuQuerySystem.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuQuerySystem.h"
#include "GuBounds.h"
#include "GuBVH.h"
#include "foundation/PxAlloca.h"
#include "common/PxProfileZone.h"
using namespace physx;
using namespace Gu;
///////////////////////////////////////////////////////////////////////////////
bool contains(PxArray<PxU32>& pruners, PxU32 index)
{
const PxU32 nb = pruners.size();
for(PxU32 i=0;i<nb;i++)
{
if(pruners[i]==index)
return true;
}
return false;
}
///////////////////////////////////////////////////////////////////////////////
QuerySystem::PrunerExt::PrunerExt(Pruner* pruner, PxU32 preallocated) : mPruner(pruner), mDirtyList("QuerySystem::PrunerExt::mDirtyList"), mNbStatic(0), mNbDynamic(0), mDirtyStatic(false)
{
if(pruner&& preallocated)
pruner->preallocate(preallocated);
}
QuerySystem::PrunerExt::~PrunerExt()
{
PX_DELETE(mPruner);
}
void QuerySystem::PrunerExt::flushMemory()
{
if(!mDirtyList.size())
mDirtyList.reset();
// PT: TODO: flush bitmap here
// PT: TODO: flush pruner here?
}
// PT: ok things became more complicated than before here. We'd like to delay the update of *both* the transform and the bounds,
// since immediately updating only one of them doesn't make much sense (it invalidates the pruner's data structure anyway). When both
// are delayed it gives users the ability to query the pruners *without* commiting the changes, i.e. they can query the old snapshot
// for as long as they please (i.e. a raycast wouldn't automatically trigger a structure update).
//
// Now the problem is that we need to store (at least) the transform until the update actually happens, and the initial code didn't
// support this. We also want to do this in an efficient way, which of course makes things more difficult.
//
// A naive version would simply use a per-pruner hashmap between the PrunerHandle and its data. Might be slower than before.
//
// Another version could build on the initial bitmap-based solution and use arrays of transforms/bounds as companions to the array
// of PrunerHandle (or we could mix all that data in a single structure). The issue with this is that two consecutive updates on the
// same object wouldn't work anymore: the second call would check the bitmap, see that the bit is set already, and skip the work.
// We'd need to update the cached data instead, i.e. we'd need a mapping between the PrunerHandle and its position in mDirtyList.
// And we don't have that.
//
// A potential way to fix this could be to allow the same PrunerHandle to appear multiple times in mDirtyList, with the assumption
// that users will not update the same object multiple times very often (...). The way it would work:
// - during "add", dirtyMap is set, handle/transform/bounds are pushed to mDirtyList.
// - during "remove", dirtyMap is reset *and that's it*. We don't bother purging mDirtyList (i.e. we kill the current O(n) search there)
// - during "process" we use dirtyMap to validate the update. If bit is cleared, ignore mDirtyList entry. Duplicate entries work as long
// as mDirtyList is processed in linear order. One issue is that the current mDirtyList is also passed to the pruner as-is for the
// update, so we'd need to rebuild a separate array for that and/or make sure all pruners accept duplicate entries in that array.
// Deep down that specific rabbit hole we'll actually find the recently discovered issue regarding the mToRefit array...
//
// Bit tricky. This is only for user-updates anyway (as opposed to sim updates) so this probably doesn't need ultimate perf? Note however
// that we "remove from dirty list" when an object is removed, which happens all the time with or without user updates (e.g. streaming etc).
static const bool gUseOldCode = false;
void QuerySystem::PrunerExt::addToDirtyList(PrunerHandle handle, PxU32 dynamic, const PxTransform& transform, const PxBounds3* userBounds)
{
PxBitMap& dirtyMap = mDirtyMap;
{
if(dirtyMap.size() <= handle)
{
PxU32 size = PxMax<PxU32>(dirtyMap.size()*2, 1024);
const PxU32 minSize = handle+1;
if(minSize>size)
size = minSize*2;
dirtyMap.resize(size);
PX_ASSERT(handle<dirtyMap.size());
PX_ASSERT(!dirtyMap.test(handle));
}
}
if(gUseOldCode)
{
if(!dirtyMap.test(handle))
{
dirtyMap.set(handle);
mDirtyList.pushBack(handle);
}
}
else
{
dirtyMap.set(handle);
mDirtyList.pushBack(handle);
Data& d = mDirtyData.insert();
d.mPose = transform;
if(userBounds)
d.mBounds = *userBounds;
else
d.mBounds.setEmpty();
}
if(!dynamic)
mDirtyStatic = true;
}
void QuerySystem::PrunerExt::removeFromDirtyList(PrunerHandle handle)
{
PxBitMap& dirtyMap = mDirtyMap;
if(gUseOldCode)
{
if(dirtyMap.boundedTest(handle))
{
dirtyMap.reset(handle);
mDirtyList.findAndReplaceWithLast(handle);
}
}
else
{
dirtyMap.boundedReset(handle);
}
// PT: if we remove the object that made us set mDirtyStatic to true, tough luck,
// we don't bother fixing that bool here. It's going to potentially cause an
// unnecessary update of the character controller's caches, which is not a big deal.
}
bool QuerySystem::PrunerExt::processDirtyList(const Adapter& adapter, float inflation)
{
const PxU32 numDirtyList = mDirtyList.size();
if(!numDirtyList)
return false;
if(gUseOldCode)
{
const PrunerHandle* const prunerHandles = mDirtyList.begin();
for(PxU32 i=0; i<numDirtyList; i++)
{
const PrunerHandle handle = prunerHandles[i];
mDirtyMap.reset(handle);
// PT: we compute the new bounds and store them directly in the pruner structure to avoid copies. We delay the updateObjects() call
// to take advantage of batching.
PrunerPayloadData payloadData;
const PrunerPayload& pp = mPruner->getPayloadData(handle, &payloadData);
computeBounds(*payloadData.mBounds, adapter.getGeometry(pp), *payloadData.mTransform, 0.0f, inflation);
}
// PT: batch update happens after the loop instead of once per loop iteration
mPruner->updateObjects(prunerHandles, numDirtyList);
mDirtyList.clear();
}
else
{
// PT: TODO: this stuff is not 100% satisfying, since we do allow the same object to be updated multiple times.
// Would be nice to revisit & improve at some point.
PrunerHandle* prunerHandles = mDirtyList.begin();
PxU32 nbValid = 0;
for(PxU32 i=0; i<numDirtyList; i++)
{
const PrunerHandle handle = prunerHandles[i];
if(mDirtyMap.test(handle))
{
// PT: we compute the new bounds and store them directly in the pruner structure to avoid copies. We delay the updateObjects() call
// to take advantage of batching.
PrunerPayloadData payloadData;
const PrunerPayload& pp = mPruner->getPayloadData(handle, &payloadData);
*payloadData.mTransform = mDirtyData[i].mPose;
if(mDirtyData[i].mBounds.isEmpty())
computeBounds(*payloadData.mBounds, adapter.getGeometry(pp), mDirtyData[i].mPose, 0.0f, inflation);
else
*payloadData.mBounds = mDirtyData[i].mBounds;
prunerHandles[nbValid++] = handle;
}
else
{
// PT: if not set, object has been added to the list then removed
}
}
// PT: batch update happens after the loop instead of once per loop iteration
mPruner->updateObjects(prunerHandles, nbValid);
// PT: have to reset the bits *after* the above loop now. Unclear if clearing the
// whole map would be faster ("it depends" I guess).
while(nbValid--)
{
const PrunerHandle handle = *prunerHandles++;
mDirtyMap.reset(handle);
}
mDirtyList.clear();
mDirtyData.clear();
}
const bool ret = mDirtyStatic;
mDirtyStatic = false;
return ret;
}
///////////////////////////////////////////////////////////////////////////////
static PX_FORCE_INLINE QuerySystem::PrunerExt* checkPrunerIndex(PxU32 prunerIndex, const PxArray<QuerySystem::PrunerExt*>& prunerExt)
{
if(prunerIndex>=prunerExt.size() || !prunerExt[prunerIndex])
{
PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "Invalid pruner index");
return NULL;
}
return prunerExt[prunerIndex];
}
QuerySystem::QuerySystem(PxU64 contextID, float inflation, const Adapter& adapter, bool usesTreeOfPruners) :
mAdapter (adapter),
mTreeOfPruners (NULL),
mContextID (contextID),
mStaticTimestamp (0),
mInflation (inflation),
mPrunerNeedsUpdating (false),
mTimestampNeedsUpdating (false),
mUsesTreeOfPruners (usesTreeOfPruners)
//mBatchUserUpdates (batchUserUpdates)
{
}
QuerySystem::~QuerySystem()
{
PX_DELETE(mTreeOfPruners);
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
PX_DELETE(pe);
}
}
PxU32 QuerySystem::addPruner(Pruner* pruner, PxU32 preallocated)
{
PrunerExt* pe = PX_NEW(PrunerExt)(pruner, preallocated);
PxU32 prunerIndex;
if(mFreePruners.size())
{
prunerIndex = mFreePruners.popBack();
mPrunerExt[prunerIndex] = pe;
}
else
{
prunerIndex = mPrunerExt.size();
mPrunerExt.pushBack(pe);
}
return prunerIndex;
}
void QuerySystem::removePruner(PxU32 prunerIndex)
{
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return;
// PT: it is legal to delete a pruner that still contains objects, but we should still properly update the static timestamp.
if(pe->mNbStatic)
invalidateStaticTimestamp();
PX_DELETE(pe);
mPrunerExt[prunerIndex] = NULL;
mFreePruners.pushBack(prunerIndex);
// We don't bother searching mDirtyPruners since it's going to be cleared next frame
}
void QuerySystem::flushMemory()
{
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(pe)
pe->flushMemory();
}
}
ActorShapeData QuerySystem::addPrunerShape(const PrunerPayload& payload, PxU32 prunerIndex, bool dynamic, const PxTransform& transform, const PxBounds3* userBounds)
{
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return INVALID_ACTOR_SHAPE_DATA;
mPrunerNeedsUpdating = true;
if(dynamic)
{
pe->mNbDynamic++;
}
else
{
pe->mNbStatic++;
invalidateStaticTimestamp();
}
PX_ASSERT(pe->mPruner);
const PxBounds3* boundsPtr;
PxBounds3 bounds;
if(userBounds)
{
boundsPtr = userBounds;
}
else
{
computeBounds(bounds, mAdapter.getGeometry(payload), transform, 0.0f, 1.0f + mInflation);
boundsPtr = &bounds;
}
PrunerHandle handle;
pe->mPruner->addObjects(&handle, boundsPtr, &payload, &transform, 1, false);
return createActorShapeData(createPrunerInfo(prunerIndex, dynamic), handle);
}
void QuerySystem::removePrunerShape(ActorShapeData data, PrunerPayloadRemovalCallback* removalCallback)
{
const PrunerInfo info = getPrunerInfo(data);
const PxU32 prunerIndex = getPrunerIndex(info);
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return;
mPrunerNeedsUpdating = true;
const PxU32 dynamic = getDynamic(info);
const PrunerHandle handle = getPrunerHandle(data);
PX_ASSERT(pe->mPruner);
if(dynamic)
{
PX_ASSERT(pe->mNbDynamic);
pe->mNbDynamic--;
}
else
{
PX_ASSERT(pe->mNbStatic);
pe->mNbStatic--;
invalidateStaticTimestamp();
}
//if(mBatchUserUpdates)
pe->removeFromDirtyList(handle);
pe->mPruner->removeObjects(&handle, 1, removalCallback);
}
void QuerySystem::updatePrunerShape(ActorShapeData data, bool immediately, const PxTransform& transform, const PxBounds3* userBounds)
{
const PrunerInfo info = getPrunerInfo(data);
const PxU32 prunerIndex = getPrunerIndex(info);
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return;
mPrunerNeedsUpdating = true;
const PxU32 dynamic = getDynamic(info);
const PrunerHandle handle = getPrunerHandle(data);
PX_ASSERT(pe->mPruner);
Pruner* pruner = pe->mPruner;
if(immediately)
{
if(!dynamic)
invalidateStaticTimestamp();
PrunerPayloadData payloadData;
const PrunerPayload& pp = pruner->getPayloadData(handle, &payloadData);
*payloadData.mTransform = transform;
if(userBounds)
*payloadData.mBounds = *userBounds;
else
computeBounds(*payloadData.mBounds, mAdapter.getGeometry(pp), transform, 0.0f, 1.0f + mInflation);
// PT: TODO: would it be better to pass the bounds & transform directly to this function?
pruner->updateObjects(&handle, 1);
}
else
{
// PT: we don't update the static timestamp immediately, so that users can query the
// old state of the structure without invalidating their caches. This will be resolved
// in processDirtyLists.
if(gUseOldCode)
pruner->setTransform(handle, transform);
// PT: we don't shrink mDirtyList anymore in removePrunerShape so the size of that array can be reused as
// a flag telling us whether we already encountered this pruner or not. If not, we add its index to mDirtyPruners.
// Goal is to avoid processing all pruners in processDirtyLists.
if(!pe->mDirtyList.size())
{
PX_ASSERT(!contains(mDirtyPruners, prunerIndex));
mDirtyPruners.pushBack(prunerIndex);
}
else
{
PX_ASSERT(contains(mDirtyPruners, prunerIndex));
}
pe->addToDirtyList(handle, dynamic, transform, userBounds);
}
}
const PrunerPayload& QuerySystem::getPayloadData(ActorShapeData data, PrunerPayloadData* ppd) const
{
const PrunerInfo info = getPrunerInfo(data);
const PxU32 prunerIndex = getPrunerIndex(info);
PX_ASSERT(checkPrunerIndex(prunerIndex, mPrunerExt));
const PrunerHandle handle = getPrunerHandle(data);
PX_ASSERT(mPrunerExt[prunerIndex]->mPruner);
return mPrunerExt[prunerIndex]->mPruner->getPayloadData(handle, ppd);
}
void QuerySystem::processDirtyLists()
{
PX_PROFILE_ZONE("QuerySystem.processDirtyLists", mContextID);
const PxU32 nbDirtyPruners = mDirtyPruners.size();
if(!nbDirtyPruners)
return;
// must already have acquired writer lock here
const float inflation = 1.0f + mInflation;
bool mustInvalidateStaticTimestamp = false;
for(PxU32 ii=0;ii<nbDirtyPruners;ii++)
{
const PxU32 i = mDirtyPruners[ii];
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(pe && pe->processDirtyList(mAdapter, inflation))
mustInvalidateStaticTimestamp = true;
}
if(mustInvalidateStaticTimestamp)
invalidateStaticTimestamp();
mDirtyPruners.clear();
}
void QuerySystem::update(bool buildStep, bool commit)
{
PX_PROFILE_ZONE("QuerySystem::update", mContextID);
if(!buildStep && !commit)
{
//mPrunerNeedsUpdating = true; // PT: removed, why was it here?
return;
}
// flush user modified objects
// if(mBatchUserUpdates)
processDirtyLists();
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
Pruner* pruner = pe->mPruner;
if(pruner)
{
if(buildStep && pruner->isDynamic())
static_cast<DynamicPruner*>(pruner)->buildStep(true);
if(commit)
pruner->commit();
}
}
if(commit)
{
if(mUsesTreeOfPruners)
createTreeOfPruners();
}
mPrunerNeedsUpdating = !commit;
}
void QuerySystem::commitUpdates()
{
PX_PROFILE_ZONE("QuerySystem.commitUpdates", mContextID);
if(mPrunerNeedsUpdating)
{
mSQLock.lock();
if(mPrunerNeedsUpdating)
{
//if(mBatchUserUpdates)
processDirtyLists();
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
Pruner* pruner = pe->mPruner;
if(pruner)
pruner->commit();
}
if(mUsesTreeOfPruners)
createTreeOfPruners();
PxMemoryBarrier();
mPrunerNeedsUpdating = false;
}
mSQLock.unlock();
}
}
PxU32 QuerySystem::startCustomBuildstep()
{
PX_PROFILE_ZONE("QuerySystem.startCustomBuildstep", mContextID);
mTimestampNeedsUpdating = false;
return mPrunerExt.size();
}
void QuerySystem::customBuildstep(PxU32 index)
{
PX_PROFILE_ZONE("QuerySystem.customBuildstep", mContextID);
PX_ASSERT(index<mPrunerExt.size());
// PT: TODO: would be better to not schedule the update of removed pruners at all
PrunerExt* pe = mPrunerExt[index]; // Can be NULL if the pruner has been removed
if(!pe)
return;
Pruner* pruner = pe->mPruner;
//void QuerySystem::processDirtyLists()
{
PX_PROFILE_ZONE("QuerySystem.processDirtyLists", mContextID);
// must already have acquired writer lock here
const float inflation = 1.0f + mInflation;
// PT: note that we don't use the mDirtyPruners array here
if(pe->processDirtyList(mAdapter, inflation))
mTimestampNeedsUpdating = true;
}
if(pruner)
{
if(pruner->isDynamic())
static_cast<DynamicPruner*>(pruner)->buildStep(true); // PT: "true" because that parameter was made for PxSceneQuerySystem::sceneQueryBuildStep(), not us
pruner->commit();
}
}
void QuerySystem::finishCustomBuildstep()
{
PX_PROFILE_ZONE("QuerySystem.finishCustomBuildstep", mContextID);
if(mUsesTreeOfPruners)
createTreeOfPruners();
mPrunerNeedsUpdating = false;
if(mTimestampNeedsUpdating)
invalidateStaticTimestamp();
mDirtyPruners.clear();
}
void QuerySystem::sync(PxU32 prunerIndex, const PrunerHandle* handles, const PxU32* boundsIndices, const PxBounds3* bounds, const PxTransform32* transforms, PxU32 count)
{
if(!count)
return;
PrunerExt* pe = checkPrunerIndex(prunerIndex, mPrunerExt);
if(!pe)
return;
Pruner* pruner = pe->mPruner;
if(pruner)
pruner->updateObjects(handles, count, mInflation, boundsIndices, bounds, transforms);
}
///////////////////////////////////////////////////////////////////////////////
namespace
{
struct LocalRaycastCB : PxBVH::RaycastCallback
{
LocalRaycastCB(const PxArray<QuerySystem::PrunerExt*>& pruners, const PrunerFilter* prunerFilter, const PxVec3& origin, const PxVec3& unitDir, PrunerRaycastCallback& cb) :
mPrunerExt(pruners), mPrunerFilter(prunerFilter), mOrigin(origin), mUnitDir(unitDir), mCB(cb) {}
virtual bool reportHit(PxU32 boundsIndex, PxReal& distance)
{
QuerySystem::PrunerExt* pe = mPrunerExt[boundsIndex]; // Can be NULL if the pruner has been removed
if(pe && (!mPrunerFilter || mPrunerFilter->processPruner(boundsIndex)))
{
Pruner* pruner = pe->mPruner;
if(!pruner->raycast(mOrigin, mUnitDir, distance, mCB))
return false;
}
return true;
}
const PxArray<QuerySystem::PrunerExt*>& mPrunerExt;
const PrunerFilter* mPrunerFilter;
const PxVec3& mOrigin;
const PxVec3& mUnitDir;
PrunerRaycastCallback& mCB;
PX_NOCOPY(LocalRaycastCB)
};
struct LocalOverlapCB : PxBVH::OverlapCallback
{
LocalOverlapCB(const PxArray<QuerySystem::PrunerExt*>& pruners, const PrunerFilter* prunerFilter, const ShapeData& queryVolume, PrunerOverlapCallback& cb) :
mPrunerExt(pruners), mPrunerFilter(prunerFilter), mQueryVolume(queryVolume), mCB(cb) {}
virtual bool reportHit(PxU32 boundsIndex)
{
QuerySystem::PrunerExt* pe = mPrunerExt[boundsIndex]; // Can be NULL if the pruner has been removed
if(pe && (!mPrunerFilter || mPrunerFilter->processPruner(boundsIndex)))
{
Pruner* pruner = pe->mPruner;
if(!pruner->overlap(mQueryVolume, mCB))
return false;
}
return true;
}
const PxArray<QuerySystem::PrunerExt*>& mPrunerExt;
const PrunerFilter* mPrunerFilter;
const ShapeData& mQueryVolume;
PrunerOverlapCallback& mCB;
PX_NOCOPY(LocalOverlapCB)
};
struct LocalSweepCB : PxBVH::RaycastCallback
{
LocalSweepCB(const PxArray<QuerySystem::PrunerExt*>& pruners, const PrunerFilter* prunerFilter, const ShapeData& queryVolume, const PxVec3& unitDir, PrunerRaycastCallback& cb) :
mPrunerExt(pruners), mPrunerFilter(prunerFilter), mQueryVolume(queryVolume), mUnitDir(unitDir), mCB(cb) {}
virtual bool reportHit(PxU32 boundsIndex, PxReal& distance)
{
QuerySystem::PrunerExt* pe = mPrunerExt[boundsIndex]; // Can be NULL if the pruner has been removed
if(pe && (!mPrunerFilter || mPrunerFilter->processPruner(boundsIndex)))
{
Pruner* pruner = pe->mPruner;
if(!pruner->sweep(mQueryVolume, mUnitDir, distance, mCB))
return false;
}
return true;
}
const PxArray<QuerySystem::PrunerExt*>& mPrunerExt;
const PrunerFilter* mPrunerFilter;
const ShapeData& mQueryVolume;
const PxVec3& mUnitDir;
PrunerRaycastCallback& mCB;
PX_NOCOPY(LocalSweepCB)
};
}
void QuerySystem::raycast(const PxVec3& origin, const PxVec3& unitDir, float& inOutDistance, PrunerRaycastCallback& cb, const PrunerFilter* prunerFilter) const
{
if(mTreeOfPruners)
{
LocalRaycastCB localCB(mPrunerExt, prunerFilter, origin, unitDir, cb);
mTreeOfPruners->raycast(origin, unitDir, inOutDistance, localCB, PxGeometryQueryFlag::Enum(0));
}
else
{
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
if(!prunerFilter || prunerFilter->processPruner(i))
{
Pruner* pruner = pe->mPruner;
if(!pruner->raycast(origin, unitDir, inOutDistance, cb))
return;
}
}
}
}
void QuerySystem::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& cb, const PrunerFilter* prunerFilter) const
{
if(mTreeOfPruners)
{
LocalOverlapCB localCB(mPrunerExt, prunerFilter, queryVolume, cb);
mTreeOfPruners->overlap(queryVolume, localCB, PxGeometryQueryFlag::Enum(0));
}
else
{
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
if(!prunerFilter || prunerFilter->processPruner(i))
{
Pruner* pruner = pe->mPruner;
if(!pruner->overlap(queryVolume, cb))
return;
}
}
}
}
void QuerySystem::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, float& inOutDistance, PrunerRaycastCallback& cb, const PrunerFilter* prunerFilter) const
{
if(mTreeOfPruners)
{
LocalSweepCB localCB(mPrunerExt, prunerFilter, queryVolume, unitDir, cb);
mTreeOfPruners->sweep(queryVolume, unitDir, inOutDistance, localCB, PxGeometryQueryFlag::Enum(0));
}
else
{
const PxU32 nb = mPrunerExt.size();
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i]; // Can be NULL if the pruner has been removed
if(!pe)
continue;
if(!prunerFilter || prunerFilter->processPruner(i))
{
Pruner* pruner = pe->mPruner;
if(!pruner->sweep(queryVolume, unitDir, inOutDistance, cb))
return;
}
}
}
}
void QuerySystem::createTreeOfPruners()
{
PX_PROFILE_ZONE("QuerySystem.createTreeOfPruners", mContextID);
PX_DELETE(mTreeOfPruners);
mTreeOfPruners = PX_NEW(BVH)(NULL);
const PxU32 nb = mPrunerExt.size();
PxBounds3* prunerBounds = reinterpret_cast<PxBounds3*>(PxAlloca(sizeof(PxBounds3)*(nb+1)));
PxU32 nbBounds = 0;
for(PxU32 i=0;i<nb;i++)
{
PrunerExt* pe = mPrunerExt[i];
Pruner* pruner = pe->mPruner;
if(pruner)
pruner->getGlobalBounds(prunerBounds[nbBounds++]);
}
mTreeOfPruners->init(nbBounds, NULL, prunerBounds, sizeof(PxBounds3), BVH_SPLATTER_POINTS, 1, 0.01f);
}
| 24,617 | C++ | 28.660241 | 187 | 0.720153 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuInternal.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INTERNAL_H
#define GU_INTERNAL_H
#include "geometry/PxCapsuleGeometry.h"
#include "geometry/PxBoxGeometry.h"
#include "GuCapsule.h"
#include "foundation/PxTransform.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxUtilities.h"
#include "foundation/PxMat33.h"
#define GU_EPSILON_SAME_DISTANCE 1e-3f
namespace physx
{
namespace Gu
{
class Box;
// PT: TODO: now that the Gu files are not exposed to users anymore, we should move back capsule-related functions
// to GuCapsule.h, etc
PX_PHYSX_COMMON_API const PxU8* getBoxEdges();
PX_PHYSX_COMMON_API void computeBoxPoints(const PxBounds3& bounds, PxVec3* PX_RESTRICT pts);
void computeBoxAroundCapsule(const Capsule& capsule, Box& box);
PxPlane getPlane(const PxTransform& pose);
PX_FORCE_INLINE PxVec3 getCapsuleHalfHeightVector(const PxTransform& transform, const PxCapsuleGeometry& capsuleGeom)
{
return transform.q.getBasisVector0() * capsuleGeom.halfHeight;
}
PX_FORCE_INLINE void getCapsuleSegment(const PxTransform& transform, const PxCapsuleGeometry& capsuleGeom, Gu::Segment& segment)
{
const PxVec3 tmp = getCapsuleHalfHeightVector(transform, capsuleGeom);
segment.p0 = transform.p + tmp;
segment.p1 = transform.p - tmp;
}
PX_FORCE_INLINE void getCapsule(Gu::Capsule& capsule, const PxCapsuleGeometry& capsuleGeom, const PxTransform& pose)
{
getCapsuleSegment(pose, capsuleGeom, capsule);
capsule.radius = capsuleGeom.radius;
}
void computeSweptBox(Gu::Box& box, const PxVec3& extents, const PxVec3& center, const PxMat33& rot, const PxVec3& unitDir, const PxReal distance);
/**
* PT: computes "alignment value" used to select the "best" triangle in case of identical impact distances (for sweeps).
* This simply computes how much a triangle is aligned with a given sweep direction.
* Captured in a function to make sure it is always computed correctly, i.e. working for double-sided triangles.
*
* \param triNormal [in] triangle's normal
* \param unitDir [in] sweep direction (normalized)
* \return alignment value in [-1.0f, 0.0f]. -1.0f for fully aligned, 0.0f for fully orthogonal.
*/
PX_FORCE_INLINE PxReal computeAlignmentValue(const PxVec3& triNormal, const PxVec3& unitDir)
{
PX_ASSERT(triNormal.isNormalized());
// PT: initial dot product gives the angle between the two, with "best" triangles getting a +1 or -1 score
// depending on their winding. We take the absolute value to ignore the impact of winding. We negate the result
// to make the function compatible with the initial code, which assumed single-sided triangles and expected -1
// for best triangles.
return -PxAbs(triNormal.dot(unitDir));
}
/**
* PT: sweeps: determines if a newly touched triangle is "better" than best one so far.
* In this context "better" means either clearly smaller impact distance, or a similar impact
* distance but a normal more aligned with the sweep direction.
*
* \param triImpactDistance [in] new triangle's impact distance
* \param triAlignmentValue [in] new triangle's alignment value (as computed by computeAlignmentValue)
* \param bestImpactDistance [in] current best triangle's impact distance
* \param bestAlignmentValue [in] current best triangle's alignment value (as computed by computeAlignmentValue)
* \param maxDistance [in] maximum distance of the query, hit cannot be longer than this maxDistance
* \return true if new triangle is better
*/
PX_FORCE_INLINE bool keepTriangle( float triImpactDistance, float triAlignmentValue,
float bestImpactDistance, float bestAlignmentValue, float maxDistance)
{
// Reject triangle if further than the maxDistance
if(triImpactDistance > maxDistance)
return false;
// If initial overlap happens, keep the triangle
if(triImpactDistance == 0.0f)
return true;
// tris have "similar" impact distances if the difference is smaller than 2*distEpsilon
float distEpsilon = GU_EPSILON_SAME_DISTANCE; // pick a farther hit within distEpsilon that is more opposing than the previous closest hit
// PT: make it a relative epsilon to make sure it still works with large distances
distEpsilon *= PxMax(1.0f, PxMax(triImpactDistance, bestImpactDistance));
// If new distance is more than epsilon closer than old distance
if(triImpactDistance < bestImpactDistance - distEpsilon)
return true;
// If new distance is no more than epsilon farther than oldDistance and "face is more opposing than previous"
if(triImpactDistance < bestImpactDistance+distEpsilon && triAlignmentValue < bestAlignmentValue)
return true;
// If alignment value is the same, but the new triangle is closer than the best distance
if(triAlignmentValue == bestAlignmentValue && triImpactDistance < bestImpactDistance)
return true;
return false;
}
PX_FORCE_INLINE bool keepTriangleBasic(float triImpactDistance, float bestImpactDistance, float maxDistance)
{
// Reject triangle if further than the maxDistance
if(triImpactDistance > maxDistance)
return false;
// If initial overlap happens, keep the triangle
if(triImpactDistance == 0.0f)
return true;
// If new distance is more than epsilon closer than old distance
if(triImpactDistance < bestImpactDistance)
return true;
return false;
}
PX_FORCE_INLINE PxVec3 cross100(const PxVec3& b)
{
return PxVec3(0.0f, -b.z, b.y);
}
PX_FORCE_INLINE PxVec3 cross010(const PxVec3& b)
{
return PxVec3(b.z, 0.0f, -b.x);
}
PX_FORCE_INLINE PxVec3 cross001(const PxVec3& b)
{
return PxVec3(-b.y, b.x, 0.0f);
}
//! Compute point as combination of barycentric coordinates
PX_FORCE_INLINE PxVec3 computeBarycentricPoint(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2, PxReal u, PxReal v)
{
// This seems to confuse the compiler...
// return (1.0f - u - v)*p0 + u*p1 + v*p2;
const PxF32 w = 1.0f - u - v;
return PxVec3(w * p0.x + u * p1.x + v * p2.x, w * p0.y + u * p1.y + v * p2.y, w * p0.z + u * p1.z + v * p2.z);
}
PX_FORCE_INLINE PxReal computeTetrahedronVolume(const PxVec3& x0, const PxVec3& x1, const PxVec3& x2, const PxVec3& x3, PxMat33& edgeMatrix)
{
const PxVec3 u1 = x1 - x0;
const PxVec3 u2 = x2 - x0;
const PxVec3 u3 = x3 - x0;
edgeMatrix = PxMat33(u1, u2, u3);
const PxReal det = edgeMatrix.getDeterminant();
const PxReal volume = det / 6.0f;
return volume;
}
PX_FORCE_INLINE PxReal computeTetrahedronVolume(const PxVec3& x0, const PxVec3& x1, const PxVec3& x2, const PxVec3& x3)
{
PxMat33 edgeMatrix;
return computeTetrahedronVolume(x0, x1, x2, x3, edgeMatrix);
}
// IndexType should be PxU16 or PxU32.
template<typename IndexType>
PX_FORCE_INLINE PxReal computeTriangleMeshVolume(const PxVec3* vertices, const IndexType* indices,
const PxU32 numTriangles)
{
// See https://twitter.com/keenanisalive/status/1437178786286653445?lang=en
float volume = 0.0f;
for(PxU32 i = 0; i < numTriangles; ++i)
{
PxVec3 v0 = vertices[indices[3*i]];
PxVec3 v1 = vertices[indices[3 * i + 1]];
PxVec3 v2 = vertices[indices[3 * i + 2]];
PxVec3 v0v1 = v0.cross(v1);
volume += v0v1.dot(v2);
}
return volume / 6.0f;
}
// IndexType should be PxU16 or PxU32.
// W in PxVec4 of vertices are ignored.
template <typename IndexType>
PX_FORCE_INLINE PxReal computeTriangleMeshVolume(const PxVec4* vertices, const IndexType* indices,
const PxU32 numTriangles)
{
// See https://twitter.com/keenanisalive/status/1437178786286653445?lang=en
float volume = 0.0f;
for(PxU32 i = 0; i < numTriangles; ++i)
{
PxVec3 v0 = vertices[indices[3 * i]].getXYZ();
PxVec3 v1 = vertices[indices[3 * i + 1]].getXYZ();
PxVec3 v2 = vertices[indices[3 * i + 2]].getXYZ();
PxVec3 v0v1 = v0.cross(v1);
volume += v0v1.dot(v2);
}
return volume / 6.0f;
}
/*!
Extend an edge along its length by a factor
*/
PX_FORCE_INLINE void makeFatEdge(PxVec3& p0, PxVec3& p1, PxReal fatCoeff)
{
PxVec3 delta = p1 - p0;
const PxReal m = delta.magnitude();
if (m > 0.0f)
{
delta *= fatCoeff / m;
p0 -= delta;
p1 += delta;
}
}
#if 0
/*!
Extend an edge along its length by a factor
*/
PX_FORCE_INLINE void makeFatEdge(aos::Vec3V& p0, aos::Vec3V& p1, const aos::FloatVArg fatCoeff)
{
const aos::Vec3V delta = aos::V3Sub(p1, p0);
const aos::FloatV m = aos::V3Length(delta);
const aos::BoolV con = aos::FIsGrtr(m, aos::FZero());
const aos::Vec3V fatDelta = aos::V3Scale(aos::V3ScaleInv(delta, m), fatCoeff);
p0 = aos::V3Sel(con, aos::V3Sub(p0, fatDelta), p0);
p1 = aos::V3Sel(con, aos::V3Add(p1, fatDelta), p1);
}
#endif
PX_FORCE_INLINE PxU32 closestAxis(const PxVec3& v, PxU32& j, PxU32& k)
{
// find largest 2D plane projection
const PxF32 absPx = PxAbs(v.x);
const PxF32 absNy = PxAbs(v.y);
const PxF32 absNz = PxAbs(v.z);
PxU32 m = 0; // x biggest axis
j = 1;
k = 2;
if (absNy > absPx && absNy > absNz)
{
// y biggest
j = 2;
k = 0;
m = 1;
}
else if (absNz > absPx)
{
// z biggest
j = 0;
k = 1;
m = 2;
}
return m;
}
PX_FORCE_INLINE bool isAlmostZero(const PxVec3& v)
{
if (PxAbs(v.x) > 1e-6f || PxAbs(v.y) > 1e-6f || PxAbs(v.z) > 1e-6f)
return false;
return true;
}
} // namespace Gu
}
#endif
| 11,197 | C | 34.66242 | 153 | 0.700188 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuIncrementalAABBPrunerCore.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "CmVisualization.h"
#include "GuIncrementalAABBPrunerCore.h"
#include "GuSqInternal.h"
#include "GuIncrementalAABBTree.h"
#include "GuCallbackAdapter.h"
#include "GuAABBTree.h"
#include "GuAABBTreeQuery.h"
#include "GuSphere.h"
#include "GuBox.h"
#include "GuCapsule.h"
#include "GuQuery.h"
using namespace physx;
using namespace Gu;
#define PARANOIA_CHECKS 0
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
IncrementalAABBPrunerCore::IncrementalAABBPrunerCore(const PruningPool* pool) :
mCurrentTree (1),
mLastTree (0),
mPool (pool)
{
mAABBTree[0].mapping.reserve(256);
mAABBTree[1].mapping.reserve(256);
mChangedLeaves.reserve(32);
}
IncrementalAABBPrunerCore::~IncrementalAABBPrunerCore()
{
release();
}
void IncrementalAABBPrunerCore::release() // this can be called from purge()
{
for(PxU32 i = 0; i < NUM_TREES; i++)
{
PX_DELETE(mAABBTree[i].tree);
mAABBTree[i].mapping.clear();
mAABBTree[i].timeStamp = 0;
}
mCurrentTree = 1;
mLastTree = 0;
}
bool IncrementalAABBPrunerCore::addObject(const PoolIndex poolIndex, PxU32 timeStamp)
{
CoreTree& tree = mAABBTree[mCurrentTree];
if(!tree.tree || !tree.tree->getNodes())
{
if(!tree.tree)
tree.tree = PX_NEW(IncrementalAABBTree)();
tree.timeStamp = timeStamp;
}
PX_ASSERT(tree.timeStamp == timeStamp);
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = tree.tree->insert(poolIndex, mPool->getCurrentWorldBoxes(), mChangedLeaves);
updateMapping(tree.mapping, poolIndex, node);
#if PARANOIA_CHECKS
test();
#endif
return true;
}
void IncrementalAABBPrunerCore::updateMapping(IncrementalPrunerMap& mapping, const PoolIndex poolIndex, IncrementalAABBTreeNode* node)
{
// if some node leaves changed, we need to update mapping
if(!mChangedLeaves.empty())
{
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
mapping[index] = node;
}
}
for(PxU32 i = 0; i < mChangedLeaves.size(); i++)
{
IncrementalAABBTreeNode* changedNode = mChangedLeaves[i];
PX_ASSERT(changedNode->isLeaf());
for(PxU32 j = 0; j < changedNode->getNbPrimitives(); j++)
{
const PoolIndex index = changedNode->getPrimitives(NULL)[j];
mapping[index] = changedNode;
}
}
}
else
{
PX_ASSERT(node->isLeaf());
mapping[poolIndex] = node;
}
}
bool IncrementalAABBPrunerCore::removeObject(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex, PxU32& timeStamp)
{
// erase the entry and get the data
IncrementalPrunerMap::Entry entry;
bool foundEntry = true;
const PxU32 treeIndex = mAABBTree[mLastTree].mapping.erase(poolIndex, entry) ? mLastTree : mCurrentTree;
// if it was not found in the last tree look at the current tree
if(treeIndex == mCurrentTree)
foundEntry = mAABBTree[mCurrentTree].mapping.erase(poolIndex, entry);
// exit somethings is wrong here, entry was not found here
// PT: removed assert to avoid crashing all UTs
// PX_ASSERT(foundEntry);
if(!foundEntry)
return false;
// tree must exist
PX_ASSERT(mAABBTree[treeIndex].tree);
CoreTree& tree = mAABBTree[treeIndex];
timeStamp = tree.timeStamp;
// remove the poolIndex from the tree, update the tree bounds immediatelly
IncrementalAABBTreeNode* node = tree.tree->remove(entry.second, poolIndex, mPool->getCurrentWorldBoxes());
if(node && node->isLeaf())
{
for(PxU32 j = 0; j < node->getNbPrimitives(); j++)
{
const PoolIndex index = node->getPrimitives(NULL)[j];
tree.mapping[index] = node;
}
}
// nothing to swap, last object, early exit
if(poolIndex == poolRelocatedLastIndex)
{
#if PARANOIA_CHECKS
test();
#endif
return true;
}
// fix the indices, we need to swap the index with last index
// erase the relocated index from the tree it is
IncrementalPrunerMap::Entry relocatedEntry;
const PxU32 treeRelocatedIndex = mAABBTree[mCurrentTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry) ? mCurrentTree : mLastTree;
foundEntry = true;
if(treeRelocatedIndex == mLastTree)
foundEntry = mAABBTree[mLastTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry);
if(foundEntry)
{
CoreTree& relocatedTree = mAABBTree[treeRelocatedIndex];
// set the new mapping
relocatedTree.mapping[poolIndex] = relocatedEntry.second;
// update the tree indices - swap
relocatedTree.tree->fixupTreeIndices(relocatedEntry.second, poolRelocatedLastIndex, poolIndex);
}
#if PARANOIA_CHECKS
test();
#endif
return true;
}
void IncrementalAABBPrunerCore::swapIndex(const PoolIndex poolIndex, const PoolIndex poolRelocatedLastIndex)
{
// fix the indices, we need to swap the index with last index
// erase the relocated index from the tre it is
IncrementalPrunerMap::Entry relocatedEntry;
const PxU32 treeRelocatedIndex = mAABBTree[mCurrentTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry) ? mCurrentTree : mLastTree;
bool foundEntry = true;
if(treeRelocatedIndex == mLastTree)
foundEntry = mAABBTree[mLastTree].mapping.erase(poolRelocatedLastIndex, relocatedEntry);
// relocated index is not here
if(!foundEntry)
return;
CoreTree& relocatedTree = mAABBTree[treeRelocatedIndex];
// set the new mapping
relocatedTree.mapping[poolIndex] = relocatedEntry.second;
// update the tree indices - swap
relocatedTree.tree->fixupTreeIndices(relocatedEntry.second, poolRelocatedLastIndex, poolIndex);
}
bool IncrementalAABBPrunerCore::updateObject(const PoolIndex poolIndex)
{
const IncrementalPrunerMap::Entry* entry = mAABBTree[mLastTree].mapping.find(poolIndex);
const PxU32 treeIndex = entry ? mLastTree : mCurrentTree;
if(!entry)
entry = mAABBTree[mCurrentTree].mapping.find(poolIndex);
// we have not found it
PX_ASSERT(entry);
if(!entry)
return false;
CoreTree& tree = mAABBTree[treeIndex];
mChangedLeaves.clear();
IncrementalAABBTreeNode* node = tree.tree->updateFast(entry->second, poolIndex, mPool->getCurrentWorldBoxes(), mChangedLeaves);
if(!mChangedLeaves.empty() || node != entry->second)
updateMapping(tree.mapping, poolIndex, node);
#if PARANOIA_CHECKS
test(false);
#endif
return true;
}
PxU32 IncrementalAABBPrunerCore::removeMarkedObjects(PxU32 timeStamp)
{
// early exit is no tree exists
if(!mAABBTree[mLastTree].tree || !mAABBTree[mLastTree].tree->getNodes())
{
PX_ASSERT(mAABBTree[mLastTree].mapping.size() == 0);
PX_ASSERT(!mAABBTree[mCurrentTree].tree || mAABBTree[mCurrentTree].timeStamp != timeStamp);
return 0;
}
PX_UNUSED(timeStamp);
PX_ASSERT(timeStamp == mAABBTree[mLastTree].timeStamp);
// release the last tree
CoreTree& tree = mAABBTree[mLastTree];
PxU32 nbObjects = tree.mapping.size();
tree.mapping.clear();
tree.timeStamp = 0;
tree.tree->release();
return nbObjects;
}
bool IncrementalAABBPrunerCore::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& pcbArgName) const
{
bool again = true;
OverlapCallbackAdapter pcb(pcbArgName, *mPool);
for(PxU32 i = 0; i < NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes() && again)
{
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, AABBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
again = AABBTreeOverlap<true, CapsuleAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
again = AABBTreeOverlap<true, SphereAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
again = AABBTreeOverlap<true, OBBAABBTest, IncrementalAABBTree, IncrementalAABBTreeNode, OverlapCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, test, pcb);
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
}
return again;
}
bool IncrementalAABBPrunerCore::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
bool again = true;
RaycastCallbackAdapter pcb(pcbArgName, *mPool);
for(PxU32 i = 0; i < NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes() && again)
{
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
again = AABBTreeRaycast<true, true, IncrementalAABBTree, IncrementalAABBTreeNode, RaycastCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, aabb.getCenter(), unitDir, inOutDistance, aabb.getExtents(), pcb);
}
}
return again;
}
bool IncrementalAABBPrunerCore::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& pcbArgName) const
{
bool again = true;
RaycastCallbackAdapter pcb(pcbArgName, *mPool);
for(PxU32 i = 0; i < NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes() && again)
{
again = AABBTreeRaycast<false, true, IncrementalAABBTree, IncrementalAABBTreeNode, RaycastCallbackAdapter>()(mPool->getCurrentAABBTreeBounds(), *tree.tree, origin, unitDir, inOutDistance, PxVec3(0.0f), pcb);
}
}
return again;
}
void IncrementalAABBPrunerCore::getGlobalBounds(PxBounds3& bounds) const
{
bounds.setEmpty();
// PT: TODO: optimize this
for(PxU32 i=0; i<NUM_TREES; i++)
{
const CoreTree& tree = mAABBTree[i];
if(tree.tree && tree.tree->getNodes())
{
PxBounds3 tmp;
StoreBounds(tmp, tree.tree->getNodes()->mBVMin, tree.tree->getNodes()->mBVMax);
bounds.include(tmp);
}
}
}
void IncrementalAABBPrunerCore::shiftOrigin(const PxVec3& shift)
{
for(PxU32 i = 0; i < NUM_TREES; i++)
{
if(mAABBTree[i].tree)
{
mAABBTree[i].tree->shiftOrigin(shift);
}
}
}
void IncrementalAABBPrunerCore::visualize(PxRenderOutput& out, PxU32 color) const
{
for(PxU32 i = 0; i < NUM_TREES; i++)
{
visualizeTree(out, color, mAABBTree[i].tree);
// Render added objects not yet in the tree
//out << PxTransform(PxIdentity);
//out << PxU32(PxDebugColor::eARGB_WHITE);
}
}
void IncrementalAABBPrunerCore::test(bool hierarchyCheck)
{
PxU32 maxDepth[NUM_TREES] = { 0, 0 };
for(PxU32 i=0; i<NUM_TREES; i++)
{
if(mAABBTree[i].tree)
{
if(hierarchyCheck)
mAABBTree[i].tree->hierarchyCheck(mPool->getCurrentWorldBoxes());
for(IncrementalPrunerMap::Iterator iter = mAABBTree[i].mapping.getIterator(); !iter.done(); ++iter)
{
mAABBTree[i].tree->checkTreeLeaf(iter->second, iter->first);
const PxU32 depth = mAABBTree[i].tree->getTreeLeafDepth(iter->second);
if(depth > maxDepth[i])
maxDepth[i] = depth;
}
}
}
}
| 13,317 | C++ | 31.169082 | 224 | 0.735601 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSweepSharedTests.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxConvexMeshGeometry.h"
#include "geometry/PxSphereGeometry.h"
#include "GuSweepTests.h"
#include "GuHeightFieldUtil.h"
#include "CmScaling.h"
#include "GuConvexMesh.h"
#include "GuIntersectionRayPlane.h"
#include "GuVecBox.h"
#include "GuVecCapsule.h"
#include "GuVecConvexHull.h"
#include "GuSweepMTD.h"
#include "GuSweepSphereCapsule.h"
#include "GuSweepCapsuleCapsule.h"
#include "GuSweepTriangleUtils.h"
#include "GuSweepCapsuleTriangle.h"
#include "GuInternal.h"
#include "GuGJKRaycast.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace physx::aos;
static const PxReal gEpsilon = .01f;
//#define USE_VIRTUAL_GJK
#ifdef USE_VIRTUAL_GJK
static bool virtualGjkRaycastPenetration(const GjkConvex& a, const GjkConvex& b, const aos::Vec3VArg initialDir, const aos::FloatVArg initialLambda, const aos::Vec3VArg s, const aos::Vec3VArg r, aos::FloatV& lambda,
aos::Vec3V& normal, aos::Vec3V& closestA, const PxReal _inflation, const bool initialOverlap)
{
return gjkRaycastPenetration<GjkConvex, GjkConvex >(a, b, initialDir, initialLambda, s, r, lambda, normal, closestA, _inflation, initialOverlap);
}
#endif
static PxU32 computeSweepConvexPlane(
const PxConvexMeshGeometry& convexGeom, ConvexHullData* hullData, const PxU32& nbPolys, const PxTransform& pose,
const PxVec3& impact_, const PxVec3& unitDir)
{
PX_ASSERT(nbPolys);
const PxVec3 impact = impact_ - unitDir * gEpsilon;
const PxVec3 localPoint = pose.transformInv(impact);
const PxVec3 localDir = pose.rotateInv(unitDir);
const FastVertex2ShapeScaling scaling(convexGeom.scale);
PxU32 minIndex = 0;
PxReal minD = PX_MAX_REAL;
for(PxU32 j=0; j<nbPolys; j++)
{
const PxPlane& pl = hullData->mPolygons[j].mPlane;
PxPlane plane;
scaling.transformPlaneToShapeSpace(pl.n, pl.d, plane.n, plane.d);
PxReal d = plane.distance(localPoint);
if(d<0.0f)
continue;
const PxReal tweak = plane.n.dot(localDir) * gEpsilon;
d += tweak;
if(d<minD)
{
minIndex = j;
minD = d;
}
}
return minIndex;
}
static PX_FORCE_INLINE bool computeFaceIndex(PxGeomSweepHit& sweepHit, const PxHitFlags hitFlags, const PxConvexMeshGeometry& convexGeom, ConvexHullData* hullData, const PxTransform& pose, const PxVec3& unitDir)
{
if(hitFlags & PxHitFlag::eFACE_INDEX)
{
// PT: compute closest polygon using the same tweak as in swept-capsule-vs-mesh
sweepHit.faceIndex = computeSweepConvexPlane(convexGeom, hullData, hullData->mNbPolygons, pose, sweepHit.position, unitDir);
sweepHit.flags |= PxHitFlag::eFACE_INDEX;
}
return true;
}
static PX_FORCE_INLINE bool hasInitialOverlap(PxGeomSweepHit& sweepHit, const PxVec3& unitDir,
const FloatVArg toi,
const Vec3VArg normal, const Vec3VArg closestA,
const PxTransformV& convexPose,
const bool isMtd, const bool impactPointOnTheOtherShape)
{
sweepHit.flags = PxHitFlag::eNORMAL;
const FloatV zero = FZero();
if(FAllGrtrOrEq(zero, toi))
{
//ML: initial overlap
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
const FloatV length = toi;
const Vec3V worldPointA = convexPose.transform(closestA);
const Vec3V worldNormal = V3Normalize(convexPose.rotate(normal));
if(impactPointOnTheOtherShape)
{
const Vec3V destWorldPointA = V3NegScaleSub(worldNormal, length, worldPointA);
V3StoreU(worldNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
}
else
{
const Vec3V destNormal = V3Neg(worldNormal);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(worldPointA, sweepHit.position);
}
FStore(length, &sweepHit.distance);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
sweepHit.faceIndex = 0xffffffff;
return true;
}
return false;
}
///////////////////////////////////////////////// sweepCapsule/Sphere //////////////////////////////////////////////////////
bool sweepCapsule_SphereGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(capsuleGeom_);
PX_UNUSED(capsulePose_);
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
const Sphere sphere(pose.p, sphereGeom.radius+inflation);
if(!sweepSphereCapsule(sphere, lss, -unitDir, distance, sweepHit.distance, sweepHit.position, sweepHit.normal, hitFlags))
return false;
const bool isMtd = hitFlags & PxHitFlag::eMTD;
if(isMtd)
{
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
if(sweepHit.distance == 0.f)
{
//intialOverlap
if(lss.p0 == lss.p1)
{
//sphere
return computeSphere_SphereMTD(sphere, Sphere(lss.p0, lss.radius), sweepHit);
}
else
{
//capsule
return computeSphere_CapsuleMTD(sphere, lss, sweepHit);
}
}
}
else
{
if(sweepHit.distance!=0.0f)
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
else
sweepHit.flags = PxHitFlag::eNORMAL;
}
return true;
}
bool sweepCapsule_PlaneGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(capsuleGeom_);
PX_UNUSED(capsulePose_);
PX_ASSERT(geom.getType() == PxGeometryType::ePLANE);
PX_UNUSED(geom);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom);
const PxPlane& worldPlane = getPlane(pose);
const PxF32 capsuleRadius = lss.radius + inflation;
PxU32 index = 0;
PxVec3 pts[2];
PxReal minDp = PX_MAX_REAL;
sweepHit.faceIndex = 0xFFFFffff; // spec says face index is undefined for planes
// Find extreme point on the capsule
// AP: removed if (lss.p0 == lss.p1 clause because it wasn't properly computing minDp)
pts[0] = lss.p0;
pts[1] = lss.p1;
for(PxU32 i=0; i<2; i++)
{
const PxReal dp = pts[i].dot(worldPlane.n);
if(dp<minDp)
{
minDp = dp;
index = i;
}
}
const bool isMtd = hitFlags & PxHitFlag::eMTD;
if(isMtd)
{
//initial overlap with the plane
if(minDp <= capsuleRadius - worldPlane.d)
{
sweepHit.flags = PxHitFlag::eNORMAL| PxHitFlag::ePOSITION;
return computePlane_CapsuleMTD(worldPlane, lss, sweepHit);
}
}
else
{
if(!(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP))
{
// test if the capsule initially overlaps with plane
if(minDp <= capsuleRadius - worldPlane.d)
{
sweepHit.flags = PxHitFlag::eNORMAL;
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
return true;
}
}
}
const PxVec3 ptOnCapsule = pts[index] - worldPlane.n*capsuleRadius;
// Raycast extreme vertex against plane
bool hitPlane = intersectRayPlane(ptOnCapsule, unitDir, worldPlane, sweepHit.distance, &sweepHit.position);
if(hitPlane && sweepHit.distance > 0 && sweepHit.distance <= distance)
{
sweepHit.normal = worldPlane.n;
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
return true;
}
return false;
}
bool sweepCapsule_CapsuleGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(capsuleGeom_);
PX_UNUSED(capsulePose_);
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
Capsule staticCapsule;
getCapsule(staticCapsule, capsuleGeom, pose);
staticCapsule.radius +=inflation;
const bool isMtd = hitFlags & PxHitFlag::eMTD;
PxU16 outFlags;
if(!sweepCapsuleCapsule(lss, staticCapsule, -unitDir, distance, sweepHit.distance, sweepHit.position, sweepHit.normal, hitFlags, outFlags))
return false;
sweepHit.flags = PxHitFlags(outFlags);
if(sweepHit.distance == 0.0f)
{
//initial overlap
if(isMtd)
{
sweepHit.flags |= PxHitFlag::ePOSITION;
return computeCapsule_CapsuleMTD(lss, staticCapsule, sweepHit);
}
}
return true;
}
bool sweepCapsule_ConvexGeom(GU_CAPSULE_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
const Vec3V zeroV = V3Zero();
const FloatV zero = FZero();
const FloatV dist = FLoad(distance);
const Vec3V worldDir = V3LoadU(unitDir);
const PxTransformV capPose = loadTransformU(capsulePose_);
const PxTransformV convexPose = loadTransformU(pose);
const PxMatTransformV aToB(convexPose.transformInv(capPose));
const FloatV capsuleHalfHeight = FLoad(capsuleGeom_.halfHeight);
const FloatV capsuleRadius = FLoad(lss.radius);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const CapsuleV capsule(aToB.p, aToB.rotate( V3Scale(V3UnitX(), capsuleHalfHeight)), capsuleRadius);
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
const Vec3V dir = convexPose.rotateInv(V3Neg(V3Scale(worldDir, dist)));
bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;//closestA and normal is in the local space of convex hull
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), convexHull.getCenter());
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, lss.radius + inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<LocalConvex<CapsuleV>, LocalConvex<ConvexHullV> >(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, lss.radius + inflation, isMtd))
return false;
#endif
if(hasInitialOverlap(sweepHit, unitDir, toi, normal, closestA, convexPose, isMtd, true))
return true;
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V worldPointA = convexPose.transform(closestA);
const FloatV length = FMul(dist, toi);
const Vec3V destNormal = V3Normalize(convexPose.rotate(normal));
const Vec3V destWorldPointA = V3ScaleAdd(worldDir, length, worldPointA);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
return computeFaceIndex(sweepHit, hitFlags, convexGeom, hullData, pose, unitDir);
}
///////////////////////////////////////////////// sweepBox //////////////////////////////////////////////////////
bool sweepBox_PlaneGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::ePLANE);
PX_UNUSED(threadContext);
PX_UNUSED(geom);
PX_UNUSED(boxPose_);
PX_UNUSED(boxGeom_);
// const PxPlaneGeometry& planeGeom = static_cast<const PxPlaneGeometry&>(geom);
sweepHit.faceIndex = 0xFFFFffff; // spec says face index is undefined for planes
PxPlane worldPlane = getPlane(pose);
worldPlane.d -=inflation;
// Find extreme point on the box
PxVec3 boxPts[8];
box.computeBoxPoints(boxPts);
PxU32 index = 0;
PxReal minDp = PX_MAX_REAL;
for(PxU32 i=0;i<8;i++)
{
const PxReal dp = boxPts[i].dot(worldPlane.n);
if(dp<minDp)
{
minDp = dp;
index = i;
}
}
bool isMtd = hitFlags & PxHitFlag::eMTD;
if(isMtd)
{
// test if box initially overlap with plane
if(minDp <= -worldPlane.d)
{
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
//compute Mtd;
return computePlane_BoxMTD(worldPlane, box, sweepHit);
}
}
else
{
if(!(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP))
{
// test if box initially overlap with plane
if(minDp <= -worldPlane.d)
{
sweepHit.flags = PxHitFlag::eNORMAL;
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
return true;
}
}
}
// Raycast extreme vertex against plane
bool hitPlane = intersectRayPlane(boxPts[index], unitDir, worldPlane, sweepHit.distance, &sweepHit.position);
if(hitPlane && sweepHit.distance > 0 && sweepHit.distance <= distance)
{
sweepHit.normal = worldPlane.n;
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
return true;
}
return false;
}
bool sweepBox_ConvexGeom(GU_BOX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_UNUSED(boxGeom_);
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
const Vec3V zeroV = V3Zero();
const FloatV zero = FZero();
const PxTransformV boxPose = loadTransformU(boxPose_);
const PxTransformV convexPose = loadTransformU(pose);
const PxMatTransformV aToB(convexPose.transformInv(boxPose));
const Vec3V boxExtents = V3LoadU(box.extents);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const BoxV boxV(zeroV, boxExtents);
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
const Vec3V worldDir = V3LoadU(unitDir);
const FloatV dist = FLoad(distance);
const Vec3V dir = convexPose.rotateInv(V3Neg(V3Scale(worldDir, dist)));
bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;
const RelativeConvex<BoxV> convexA(boxV, aToB);
const LocalConvex<ConvexHullV> convexB(convexHull);
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<RelativeConvex<BoxV>,LocalConvex<ConvexHullV> >(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#endif
if(hasInitialOverlap(sweepHit, unitDir, toi, normal, closestA, convexPose, isMtd, true))
return true;
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destNormal = V3Normalize(convexPose.rotate(normal));
const FloatV length = FMul(dist, toi);
const Vec3V worldPointA = convexPose.transform(closestA);
const Vec3V destWorldPointA = V3ScaleAdd(worldDir, length, worldPointA);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
return computeFaceIndex(sweepHit, hitFlags, convexGeom, hullData, pose, unitDir);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool Gu::sweepCapsuleTriangles(GU_SWEEP_TRIANGLES_FUNC_PARAMS(PxCapsuleGeometry))
{
Capsule capsule;
getCapsule(capsule, geom, pose);
capsule.radius +=inflation;
// Compute swept box
Box capsuleBox;
computeBoxAroundCapsule(capsule, capsuleBox);
BoxPadded sweptBounds;
computeSweptBox(sweptBounds, capsuleBox.extents, capsuleBox.center, capsuleBox.rot, unitDir, distance);
PxVec3 triNormal;
return sweepCapsuleTriangles_Precise(nbTris, triangles, capsule, unitDir, distance, cachedIndex, hit, triNormal, hitFlags, doubleSided, &sweptBounds);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool sweepConvex_SphereGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
PX_ASSERT(geom.getType() == PxGeometryType::eSPHERE);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
const Vec3V zeroV = V3Zero();
const FloatV zero= FZero();
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const FloatV sphereRadius = FLoad(sphereGeom.radius);
const PxTransformV sphereTransf = loadTransformU(pose);
const PxTransformV convexTransf = loadTransformU(convexPose);
const PxMatTransformV aToB(convexTransf.transformInv(sphereTransf));
const Vec3V worldDir = V3LoadU(unitDir);
const FloatV dist = FLoad(distance);
const Vec3V dir = convexTransf.rotateInv(V3Scale(worldDir, dist));
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
//CapsuleV capsule(zeroV, sphereRadius);
const CapsuleV capsule(aToB.p, sphereRadius);
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;
const LocalConvex<CapsuleV> convexA(capsule);
const LocalConvex<ConvexHullV> convexB(convexHull);
const Vec3V initialSearchDir = V3Sub(capsule.getCenter(), convexHull.getCenter());
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, sphereGeom.radius+inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<LocalConvex<CapsuleV>, LocalConvex<ConvexHullV> >(convexA, convexB, initialSearchDir, zero, zeroV, dir, toi, normal, closestA, sphereGeom.radius+inflation, isMtd))
return false;
#endif
if(hasInitialOverlap(sweepHit, unitDir, toi, normal, closestA, convexPose, isMtd, false))
return true;
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V destNormal = V3Neg(V3Normalize(convexTransf.rotate(normal)));
const FloatV length = FMul(dist, toi);
const Vec3V destWorldPointA = convexTransf.transform(closestA);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(destWorldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
sweepHit.faceIndex = 0xffffffff;
return true;
}
bool sweepConvex_PlaneGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::ePLANE);
PX_UNUSED(hitFlags);
PX_UNUSED(geom);
PX_UNUSED(threadContext);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
sweepHit.faceIndex = 0xFFFFffff; // spec says face index is undefined for planes
const PxVec3* PX_RESTRICT hullVertices = hullData->getHullVertices();
PxU32 numHullVertices = hullData->mNbHullVertices;
const bool isMtd = hitFlags & PxHitFlag::eMTD;
const FastVertex2ShapeScaling convexScaling(convexGeom.scale);
PxPlane plane = getPlane(pose);
plane.d -=inflation;
sweepHit.distance = distance;
bool status = false;
bool initialOverlap = false;
while(numHullVertices--)
{
const PxVec3& vertex = *hullVertices++;
const PxVec3 worldPt = convexPose.transform(convexScaling * vertex);
float t;
PxVec3 pointOnPlane;
if(intersectRayPlane(worldPt, unitDir, plane, t, &pointOnPlane))
{
if(plane.distance(worldPt) <= 0.0f)
{
initialOverlap = true;
break;
//// Convex touches plane
//sweepHit.distance = 0.0f;
//sweepHit.flags = PxHitFlag::eNORMAL;
//sweepHit.normal = -unitDir;
//return true;
}
if(t > 0.0f && t <= sweepHit.distance)
{
sweepHit.distance = t;
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
sweepHit.position = pointOnPlane;
sweepHit.normal = plane.n;
status = true;
}
}
}
if(initialOverlap)
{
if(isMtd)
{
sweepHit.flags = PxHitFlag::ePOSITION | PxHitFlag::eNORMAL;
return computePlane_ConvexMTD(plane, convexGeom, convexPose, sweepHit);
}
else
{
sweepHit.distance = 0.0f;
sweepHit.flags = PxHitFlag::eNORMAL;
sweepHit.normal = -unitDir;
return true;
}
}
return status;
}
bool sweepConvex_CapsuleGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eCAPSULE);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom);
Capsule capsule;
getCapsule(capsule, capsuleGeom, pose);
// remove PxHitFlag::eFACE_INDEX, not neeeded to compute.
PxHitFlags tempHitFlags = hitFlags;
tempHitFlags &= ~PxHitFlag::eFACE_INDEX;
if(!sweepCapsule_ConvexGeom(convexGeom, convexPose, capsuleGeom, pose, capsule, -unitDir, distance, sweepHit, tempHitFlags, inflation, threadContext))
return false;
if(sweepHit.flags & PxHitFlag::ePOSITION)
sweepHit.position += unitDir * sweepHit.distance;
sweepHit.normal = -sweepHit.normal;
sweepHit.faceIndex = 0xffffffff;
return true;
}
bool sweepConvex_BoxGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_ASSERT(geom.getType() == PxGeometryType::eBOX);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom);
Box box;
buildFrom(box, pose.p, boxGeom.halfExtents, pose.q);
// remove PxHitFlag::eFACE_INDEX, not neeeded to compute.
PxHitFlags tempHitFlags = hitFlags;
tempHitFlags &= ~PxHitFlag::eFACE_INDEX;
if(!sweepBox_ConvexGeom(convexGeom, convexPose, boxGeom, pose, box, -unitDir, distance, sweepHit, tempHitFlags, inflation, threadContext))
return false;
if(sweepHit.flags & PxHitFlag::ePOSITION)
sweepHit.position += unitDir * sweepHit.distance;
sweepHit.normal = -sweepHit.normal;
sweepHit.faceIndex = 0xffffffff;
return true;
}
bool sweepConvex_ConvexGeom(GU_CONVEX_SWEEP_FUNC_PARAMS)
{
PX_UNUSED(threadContext);
using namespace aos;
PX_ASSERT(geom.getType() == PxGeometryType::eCONVEXMESH);
const PxConvexMeshGeometry& otherConvexGeom = static_cast<const PxConvexMeshGeometry&>(geom);
ConvexMesh& otherConvexMesh = *static_cast<ConvexMesh*>(otherConvexGeom.convexMesh);
ConvexMesh* convexMesh = static_cast<ConvexMesh*>(convexGeom.convexMesh);
ConvexHullData* hullData = &convexMesh->getHull();
ConvexHullData* otherHullData = &otherConvexMesh.getHull();
const Vec3V zeroV = V3Zero();
const FloatV zero = FZero();
const Vec3V otherVScale = V3LoadU_SafeReadW(otherConvexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV otherVQuat = QuatVLoadU(&otherConvexGeom.scale.rotation.x);
const Vec3V vScale = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat = QuatVLoadU(&convexGeom.scale.rotation.x);
const PxTransformV otherTransf = loadTransformU(pose);
const PxTransformV convexTransf = loadTransformU(convexPose);
const Vec3V worldDir = V3LoadU(unitDir);
const FloatV dist = FLoad(distance);
const Vec3V dir = convexTransf.rotateInv(V3Scale(worldDir, dist));
const PxMatTransformV aToB(convexTransf.transformInv(otherTransf));
const ConvexHullV otherConvexHull(otherHullData, zeroV, otherVScale, otherVQuat, otherConvexGeom.scale.isIdentity());
const ConvexHullV convexHull(hullData, zeroV, vScale, vQuat, convexGeom.scale.isIdentity());
const bool isMtd = hitFlags & PxHitFlag::eMTD;
FloatV toi;
Vec3V closestA, normal;
const RelativeConvex<ConvexHullV> convexA(otherConvexHull, aToB);
const LocalConvex<ConvexHullV> convexB(convexHull);
#ifdef USE_VIRTUAL_GJK
if(!virtualGjkRaycastPenetration(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#else
if(!gjkRaycastPenetration<RelativeConvex<ConvexHullV>, LocalConvex<ConvexHullV> >(convexA, convexB, aToB.p, zero, zeroV, dir, toi, normal, closestA, inflation, isMtd))
return false;
#endif
if(hasInitialOverlap(sweepHit, unitDir, toi, normal, closestA, convexPose, isMtd, false))
return true;
sweepHit.flags |= PxHitFlag::ePOSITION;
const Vec3V worldPointA = convexTransf.transform(closestA);
const Vec3V destNormal = V3Neg(V3Normalize(convexTransf.rotate(normal)));
const FloatV length = FMul(dist, toi);
V3StoreU(destNormal, sweepHit.normal);
V3StoreU(worldPointA, sweepHit.position);
FStore(length, &sweepHit.distance);
return computeFaceIndex(sweepHit, hitFlags, otherConvexGeom, otherHullData, pose, unitDir);
}
| 25,221 | C++ | 32.230566 | 216 | 0.736014 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuIncrementalAABBTree.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_INCREMENTAL_AABB_TREE_H
#define GU_INCREMENTAL_AABB_TREE_H
#include "foundation/PxBounds3.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxPool.h"
#include "common/PxPhysXCommonConfig.h"
#include "GuAABBTree.h"
#include "GuPrunerTypedef.h"
namespace physx
{
using namespace aos;
namespace Gu
{
struct BVHNode;
class BVH;
#define INCR_NB_OBJECTS_PER_NODE 4
// tree indices, can change in runtime
struct AABBTreeIndices
{
PX_FORCE_INLINE AABBTreeIndices(PoolIndex index) : nbIndices(1)
{
indices[0] = index;
for(PxU32 i=1; i<INCR_NB_OBJECTS_PER_NODE; i++)
indices[i] = 0;
}
PxU32 nbIndices;
PoolIndex indices[INCR_NB_OBJECTS_PER_NODE];
};
// tree node, has parent information
class IncrementalAABBTreeNode : public PxUserAllocated
{
public:
PX_FORCE_INLINE IncrementalAABBTreeNode() : mParent(NULL)
{
mChilds[0] = NULL;
mChilds[1] = NULL;
}
PX_FORCE_INLINE IncrementalAABBTreeNode(AABBTreeIndices* indices) : mParent(NULL)
{
mIndices = indices;
mChilds[1] = NULL;
}
PX_FORCE_INLINE ~IncrementalAABBTreeNode() {}
PX_FORCE_INLINE PxU32 isLeaf() const { return PxU32(mChilds[1]==0); }
PX_FORCE_INLINE const PxU32* getPrimitives(const PxU32*) const { return &mIndices->indices[0]; }
PX_FORCE_INLINE PxU32* getPrimitives(PxU32*) { return &mIndices->indices[0]; }
PX_FORCE_INLINE PxU32 getNbPrimitives() const { return mIndices->nbIndices; }
PX_FORCE_INLINE PxU32 getPrimitiveIndex() const { return PX_INVALID_U32; }
PX_FORCE_INLINE const IncrementalAABBTreeNode* getPos(const IncrementalAABBTreeNode*) const { return mChilds[0]; }
PX_FORCE_INLINE const IncrementalAABBTreeNode* getNeg(const IncrementalAABBTreeNode*) const { return mChilds[1]; }
PX_FORCE_INLINE IncrementalAABBTreeNode* getPos(IncrementalAABBTreeNode*) { return mChilds[0]; }
PX_FORCE_INLINE IncrementalAABBTreeNode* getNeg(IncrementalAABBTreeNode*) { return mChilds[1]; }
// PT: TODO: these functions are duplicates from the regular AABB tree node
PX_FORCE_INLINE void getAABBCenterExtentsV(physx::aos::Vec3V* center, physx::aos::Vec3V* extents) const
{
const float half = 0.5f;
const FloatV halfV = FLoad(half);
*extents = Vec3V_From_Vec4V((V4Scale(V4Sub(mBVMax, mBVMin), halfV)));
*center = Vec3V_From_Vec4V((V4Scale(V4Add(mBVMax, mBVMin), halfV)));
}
PX_FORCE_INLINE void getAABBCenterExtentsV2(physx::aos::Vec3V* center, physx::aos::Vec3V* extents) const
{
*extents = Vec3V_From_Vec4V((V4Sub(mBVMax, mBVMin)));
*center = Vec3V_From_Vec4V((V4Add(mBVMax, mBVMin)));
}
Vec4V mBVMin; // Global bounding-volume min enclosing all the node-related primitives
Vec4V mBVMax; // Global bounding-volume max enclosing all the node-related primitives
IncrementalAABBTreeNode* mParent; // node parent
union
{
IncrementalAABBTreeNode* mChilds[2]; // childs of node if not a leaf
AABBTreeIndices* mIndices; // if leaf, indices information
};
};
struct IncrementalAABBTreeNodePair
{
IncrementalAABBTreeNode mNode0;
IncrementalAABBTreeNode mNode1;
};
typedef PxArray<IncrementalAABBTreeNode*> NodeList;
// incremental AABB tree, all changes are immediatelly reflected to the tree
class IncrementalAABBTree : public PxUserAllocated
{
public:
PX_PHYSX_COMMON_API IncrementalAABBTree();
PX_PHYSX_COMMON_API ~IncrementalAABBTree();
// Build the tree for the first time
PX_PHYSX_COMMON_API bool build(const AABBTreeBuildParams& params, PxArray<IncrementalAABBTreeNode*>& mapping);
// insert a new index into the tree
PX_PHYSX_COMMON_API IncrementalAABBTreeNode* insert(const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf);
// update the object in the tree - full update insert/remove
PX_PHYSX_COMMON_API IncrementalAABBTreeNode* update(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf);
// update the object in the tree, faster method, that may unbalance the tree
PX_PHYSX_COMMON_API IncrementalAABBTreeNode* updateFast(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds, NodeList& changedLeaf);
// remove object from the tree
PX_PHYSX_COMMON_API IncrementalAABBTreeNode* remove(IncrementalAABBTreeNode* node, const PoolIndex index, const PxBounds3* bounds);
// fixup the tree indices, if we swapped the objects in the pruning pool
PX_PHYSX_COMMON_API void fixupTreeIndices(IncrementalAABBTreeNode* node, const PoolIndex index, const PoolIndex newIndex);
// origin shift
PX_PHYSX_COMMON_API void shiftOrigin(const PxVec3& shift);
// get the tree root node
PX_FORCE_INLINE const IncrementalAABBTreeNode* getNodes() const { return mRoot; }
// define this function so we can share the scene query code with regular AABBTree
PX_FORCE_INLINE const PxU32* getIndices() const { return NULL; }
// paranoia checks
PX_PHYSX_COMMON_API void hierarchyCheck(PoolIndex maxIndex, const PxBounds3* bounds);
PX_PHYSX_COMMON_API void hierarchyCheck(const PxBounds3* bounds);
PX_PHYSX_COMMON_API void checkTreeLeaf(IncrementalAABBTreeNode* leaf, PoolIndex h);
PX_PHYSX_COMMON_API PxU32 getTreeLeafDepth(IncrementalAABBTreeNode* leaf);
PX_PHYSX_COMMON_API void release();
PX_PHYSX_COMMON_API void copy(const BVH& bvh, PxArray<IncrementalAABBTreeNode*>& mapping);
private:
// clone the tree from the generic AABB tree that was built
void clone(PxArray<IncrementalAABBTreeNode*>& mapping, const PxU32* indices, IncrementalAABBTreeNode** treeNodes);
void copyNode(IncrementalAABBTreeNode& destNode, const BVHNode& sourceNode, const BVHNode* nodeBase,
IncrementalAABBTreeNode* parent, const PxU32* primitivesBase, PxArray<IncrementalAABBTreeNode*>& mapping);
// split leaf node, the newly added object does not fit in
IncrementalAABBTreeNode* splitLeafNode(IncrementalAABBTreeNode* node, const PoolIndex index, const Vec4V& minV, const Vec4V& maxV, const PxBounds3* bounds);
void rotateTree(IncrementalAABBTreeNode* node, NodeList& changedLeaf, PxU32 largesRotateNode, const PxBounds3* bounds, bool rotateAgain);
void releaseNode(IncrementalAABBTreeNode* node);
PxPool<AABBTreeIndices> mIndicesPool;
PxPool<IncrementalAABBTreeNodePair> mNodesPool;
IncrementalAABBTreeNode* mRoot;
NodeAllocator mNodeAllocator;
};
}
}
#endif
| 8,796 | C | 43.882653 | 166 | 0.701342 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSecondaryPruner.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSecondaryPruner.h"
#include "GuBucketPruner.h"
#include "GuIncrementalAABBPrunerCore.h"
//#define USE_DEBUG_PRINTF
#ifdef USE_DEBUG_PRINTF
#include <stdio.h>
#endif
using namespace physx;
using namespace Gu;
class CompanionPrunerBucket : public CompanionPruner
{
public:
CompanionPrunerBucket() : mPrunerCore(false) {}
virtual ~CompanionPrunerBucket() {}
virtual bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, PoolIndex poolIndex)
{
PX_UNUSED(poolIndex);
PX_UNUSED(handle);
return mPrunerCore.addObject(object, worldAABB, transform, timeStamp);
}
virtual bool updateObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PoolIndex poolIndex)
{
PX_UNUSED(poolIndex);
PX_UNUSED(handle);
return mPrunerCore.updateObject(worldAABB, object, transform);
}
virtual bool removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, PxU32 swapObjectIndex)
{
PX_UNUSED(objectIndex);
PX_UNUSED(swapObjectIndex);
PX_UNUSED(handle);
PxU32 timeStamp;
return mPrunerCore.removeObject(object, timeStamp);
}
virtual void swapIndex(PxU32 objectIndex, PxU32 swapObjectIndex)
{
PX_UNUSED(objectIndex);
PX_UNUSED(swapObjectIndex);
}
virtual PxU32 removeMarkedObjects(PxU32 timeStamp) { return mPrunerCore.removeMarkedObjects(timeStamp); }
virtual void shiftOrigin(const PxVec3& shift) { mPrunerCore.shiftOrigin(shift); }
virtual void timeStampChange() { }
virtual void build() { mPrunerCore.build(); }
virtual PxU32 getNbObjects() const { return mPrunerCore.getNbObjects(); }
virtual void release() { mPrunerCore.release(); }
virtual void visualize(PxRenderOutput& out, PxU32 color) const { mPrunerCore.visualize(out, color); }
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
if(mPrunerCore.getNbObjects())
return mPrunerCore.raycast(origin, unitDir, inOutDistance, prunerCallback);
return true;
}
virtual bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const
{
if(mPrunerCore.getNbObjects())
return mPrunerCore.overlap(queryVolume, prunerCallback);
return true;
}
virtual bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
if(mPrunerCore.getNbObjects())
return mPrunerCore.sweep(queryVolume, unitDir, inOutDistance, prunerCallback);
return true;
}
virtual void getGlobalBounds(PxBounds3& bounds) const
{
mPrunerCore.getGlobalBounds(bounds);
}
BucketPrunerCore mPrunerCore;
};
class CompanionPrunerIncremental : public CompanionPruner
{
public:
CompanionPrunerIncremental(const PruningPool* pool) : mPrunerCore(pool) {}
virtual ~CompanionPrunerIncremental() {}
virtual bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, PoolIndex poolIndex)
{
PX_UNUSED(worldAABB);
PX_UNUSED(transform);
PX_UNUSED(object);
PX_UNUSED(handle);
return mPrunerCore.addObject(poolIndex, timeStamp);
}
virtual bool updateObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PoolIndex poolIndex)
{
PX_UNUSED(worldAABB);
PX_UNUSED(transform);
PX_UNUSED(object);
PX_UNUSED(handle);
return mPrunerCore.updateObject(poolIndex);
}
virtual bool removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, PxU32 swapObjectIndex)
{
PX_UNUSED(object);
PX_UNUSED(handle);
PxU32 timeStamp;
return mPrunerCore.removeObject(objectIndex, swapObjectIndex, timeStamp);
}
virtual void swapIndex(PxU32 objectIndex, PxU32 swapObjectIndex)
{
mPrunerCore.swapIndex(objectIndex, swapObjectIndex);
}
virtual PxU32 removeMarkedObjects(PxU32 timeStamp) { return mPrunerCore.removeMarkedObjects(timeStamp); }
virtual void shiftOrigin(const PxVec3& shift) { mPrunerCore.shiftOrigin(shift); }
virtual void timeStampChange() { mPrunerCore.timeStampChange(); }
virtual void build() { mPrunerCore.build(); }
virtual PxU32 getNbObjects() const { return mPrunerCore.getNbObjects(); }
virtual void release() { mPrunerCore.release(); }
virtual void visualize(PxRenderOutput& out, PxU32 color) const { mPrunerCore.visualize(out, color); }
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
if(mPrunerCore.getNbObjects())
return mPrunerCore.raycast(origin, unitDir, inOutDistance, prunerCallback);
return true;
}
virtual bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const
{
if(mPrunerCore.getNbObjects())
return mPrunerCore.overlap(queryVolume, prunerCallback);
return true;
}
virtual bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
if(mPrunerCore.getNbObjects())
return mPrunerCore.sweep(queryVolume, unitDir, inOutDistance, prunerCallback);
return true;
}
virtual void getGlobalBounds(PxBounds3& bounds) const
{
mPrunerCore.getGlobalBounds(bounds);
}
IncrementalAABBPrunerCore mPrunerCore;
};
#define USE_MAVERICK_NODE
#include "GuActorShapeMap.h"
#include "GuBVH.h"
#include "GuAABBTreeNode.h"
#include "GuAABBTreeBuildStats.h"
#include "GuAABBTreeQuery.h"
#include "GuQuery.h"
#ifdef USE_MAVERICK_NODE
#include "GuMaverickNode.h"
#endif
static const bool gUpdateTreeWhenRemovingObject = false;
static const bool gUpdateObjectBoundsWhenRemovingObject = true;
class CompanionPrunerAABBTree : public CompanionPruner
{
enum DirtyFlags
{
NEEDS_REBUILD = (1<<0),
NEEDS_REFIT = (1<<1)
};
public:
CompanionPrunerAABBTree(PxU64 contextID, const PruningPool* pool);
virtual ~CompanionPrunerAABBTree();
virtual bool addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, PoolIndex poolIndex);
virtual bool updateObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PoolIndex poolIndex);
virtual bool removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, PxU32 swapObjectIndex);
virtual void swapIndex(PxU32 objectIndex, PxU32 swapObjectIndex);
virtual PxU32 removeMarkedObjects(PxU32 timeStamp);
virtual void shiftOrigin(const PxVec3& shift);
virtual void timeStampChange();
virtual void build();
virtual PxU32 getNbObjects() const;
virtual void release();
virtual void visualize(PxRenderOutput& out, PxU32 color) const;
virtual bool raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const;
virtual bool overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const;
virtual bool sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const;
virtual void getGlobalBounds(PxBounds3& bounds) const;
// PT: we have multiple options here, not sure which one is best:
// - use a Gu:BVH
// - use a Gu:AABBTree
// - use a full blown Pruner
// - use/reference the master PruningPool or not
// - use a hashmap
// - use PoolIndex
// - use PrunerHandle
// - somehow return our own local index to caller and use that
//
// The current implementation uses a PxBVH, a reference to the master PruningPool, and PrunerHandles.
#ifdef USE_MAVERICK_NODE
MaverickNode mMaverick;
#endif
const PruningPool* mPool;
struct LocalData
{
PX_FORCE_INLINE LocalData(PxU32 timestamp, PrunerHandle handle) : mTimestamp(timestamp), mHandle(handle) {}
PxU32 mTimestamp;
PrunerHandle mHandle;
PX_FORCE_INLINE void setRemoved()
{
mTimestamp = 0xffffffff;
mHandle = 0xffffffff;
}
PX_FORCE_INLINE bool isValid(PxU32 lastValidTimestamp) const
{
return mHandle != 0xffffffff && mTimestamp>=lastValidTimestamp;
}
};
PxArray<LocalData> mLocalData;
BVH* mBVH;
PxU32* mRemap; // Pruner handle to local index
PxU32 mMapSize;
PxU32 mDirtyFlags;
PxU32 mLastValidTimestamp;
PX_FORCE_INLINE PxU32 getNbObjectsFast() const { return mLocalData.size(); }
bool addObjectInternal(PrunerHandle handle, PxU32 timeStamp);
void releaseInternal();
void resizeMap(PxU32 index);
};
CompanionPrunerAABBTree::CompanionPrunerAABBTree(PxU64 /*contextID*/, const PruningPool* pool) : mPool(pool),
mBVH (NULL),
mRemap (NULL),
mMapSize (0),
mDirtyFlags (0),
mLastValidTimestamp (0)
{
}
CompanionPrunerAABBTree::~CompanionPrunerAABBTree()
{
releaseInternal();
}
void CompanionPrunerAABBTree::releaseInternal()
{
PX_DELETE(mBVH);
PX_FREE(mRemap);
mMapSize = 0;
mDirtyFlags = 0;
mLastValidTimestamp = 0;
}
void CompanionPrunerAABBTree::resizeMap(PxU32 index)
{
PxU32 size = mMapSize ? mMapSize*2 : 64;
const PxU32 minSize = index+1;
if(minSize>size)
size = minSize*2;
PxU32* items = PX_ALLOCATE(PxU32, size, "Map");
if(mRemap)
PxMemCopy(items, mRemap, mMapSize*sizeof(PxU32));
PxMemSet(items+mMapSize, 0xff, (size-mMapSize)*sizeof(PxU32));
PX_FREE(mRemap);
mRemap = items;
mMapSize = size;
}
bool CompanionPrunerAABBTree::addObjectInternal(PrunerHandle handle, PxU32 timeStamp)
{
const PxU32 localIndex = getNbObjectsFast();
#ifdef USE_DEBUG_PRINTF
printf("add %d %d to local %d\n", handle, timeStamp, localIndex);
#endif
PX_ASSERT(handle!=0xffffffff);
if(handle>=mMapSize)
resizeMap(handle);
PX_ASSERT(mRemap[handle]==0xffffffff || !mLocalData[mRemap[handle]].isValid(mLastValidTimestamp));
mRemap[handle] = localIndex;
mLocalData.pushBack(LocalData(timeStamp, handle));
PX_DELETE(mBVH);
mDirtyFlags = NEEDS_REBUILD;
// PT: TODO: why didn't we return a secondary pruner handle from here? Could have been stored in the padding bytes of the pruning pool's transform array for example
return true;
}
bool CompanionPrunerAABBTree::addObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PxU32 timeStamp, PoolIndex poolIndex)
{
PX_UNUSED(object);
PX_UNUSED(worldAABB);
PX_UNUSED(transform);
PX_UNUSED(timeStamp);
PX_UNUSED(poolIndex);
#ifdef USE_MAVERICK_NODE
if(mMaverick.addObject(object, handle, worldAABB, transform, timeStamp))
return true;
PxU32 nbToAdd = mMaverick.mNbFree;
for(PxU32 i=0;i<nbToAdd;i++)
addObjectInternal(mMaverick.mFreeHandles[i], mMaverick.mFreeStamps[i]);
mMaverick.mNbFree = 0;
#endif
return addObjectInternal(handle, timeStamp);
}
bool CompanionPrunerAABBTree::updateObject(const PrunerPayload& object, PrunerHandle handle, const PxBounds3& worldAABB, const PxTransform& transform, PoolIndex poolIndex)
{
PX_UNUSED(object);
PX_UNUSED(worldAABB);
PX_UNUSED(transform);
PX_UNUSED(poolIndex);
PX_UNUSED(handle);
#ifdef USE_MAVERICK_NODE
if(mMaverick.updateObject(handle, worldAABB, transform))
return true;
#endif
// PT: the bounds & transform have already been updated in the source pruning pool.
// We just need to mark the corresponding node for refit.
PX_ASSERT(handle<mMapSize);
const PxU32 localIndex = mRemap[handle];
PX_ASSERT(localIndex<getNbObjectsFast());
PX_ASSERT(localIndex!=0xffffffff);
PX_ASSERT(mLocalData[localIndex].mHandle==handle);
if(mBVH && mBVH->updateBoundsInternal(localIndex, worldAABB))
mDirtyFlags |= NEEDS_REFIT;
return true;
}
bool CompanionPrunerAABBTree::removeObject(const PrunerPayload& object, PrunerHandle handle, PxU32 objectIndex, PxU32 swapObjectIndex)
{
PX_UNUSED(object);
PX_UNUSED(objectIndex);
PX_UNUSED(swapObjectIndex);
PX_UNUSED(handle);
#ifdef USE_MAVERICK_NODE
PxU32 unused;
if(mMaverick.removeObject(handle, unused))
return true;
#endif
PX_ASSERT(handle<mMapSize);
const PxU32 localIndex = mRemap[handle];
PX_ASSERT(localIndex<getNbObjectsFast());
PX_ASSERT(localIndex!=0xffffffff);
PX_ASSERT(mLocalData[localIndex].mHandle==handle);
// PT: technically this is all we need to mark the object as removed. We can then test the handle against 0xffffffff during
// queries and skip the object. This is optimal in terms of remove performance, but not optimal in terms of query performance.
// There are a number of extra steps we could do here:
//
// - invalidate the *object* bounds. This means the tree structure itself doesn't change, it's only the object bounds used in
// leaf nodes that do. Empty bounds for removed objects mean we discard the object before reaching the previously mentioned
// handle test. This does not need an "update map".
//
// - update the number of primitives in the node. In this case we update the contents of a leaf node, which means decreasing
// the number of primitives there and reordering them so that there is no hole in the list. This requires an update map so
// it uses more memory and more CPU time during the remove call. It also has a really, really, really nasty side-effect of
// invalidating the optimization that skips the object bounds test in the traversal code when the number of primitives is 1. (*)
//
// - the next step would be to recompute the *node* bounds, to take into account the fact that one of the bounds involved in
// its computation is now empty. This would avoid visiting the node at all in some queries, so it is probably worth doing
// if we already do the previous step. (It also requires an update map and pretty much visiting the same memory).
//
// - finally the last step would be to then refit the branch involving that node. This is more complicated because it needs
// support for partial refit in the tree, i.e. links to parent nodes etc. If we do that though, the previous step can be
// skipped since the node bounds recomputation will happen automatically as part of the refit procedure. The previous step
// is only useful as a limited refit (limited to a single node) when parent pointers are not available. The previous step
// can also be used as an optimization if the recomputed bounds is the same as the old one, then we can skip the more
// costly refit procedure. In fact this could probably be used as an optimization for the refit loop: if the box is the
// same as before we could break out of the loop. Note that it is possible to skip this last step here because the new
// bounds are guaranteed to be smaller than or equal to the previous bounds. We couldn't skip this part in the "update
// object" codepath for example.
//
// (*) the optimization relies on the fact that the narrow-phase test is roughly as expensive as the AABB test within the
// tree, so it skips it if there is only one primitive in the node. (With multiple primitives it's worth doing the test
// anyway since one AABB test can skip N narrow-phase tests). The nasty bit is that removing an object can suddenly mean
// the AABB test isn't done anymore, and while it isn't a big deal in practice it's enough to break unit tests that don't
// expect that.
#ifdef USE_DEBUG_PRINTF
printf("remove %d %d from %d\n", handle, mLocalData[localIndex].mTimestamp, localIndex);
#endif
mRemap[handle] = 0xffffffff;
mLocalData[localIndex].setRemoved();
if(mBVH)
{
BVHData& data = const_cast<BVHData&>(mBVH->getData());
const PxU32 nbNodes = data.mNbNodes;
PX_UNUSED(nbNodes);
BVHNode* nodes = data.mNodes;
PxU32* indices = data.mIndices;
PxBounds3* bounds = data.mBounds.getBounds();
if(gUpdateObjectBoundsWhenRemovingObject)
{
// Invalidates the object bounds, not always needed
// The node bounds would need recomputing, and the branch refit
bounds[localIndex].minimum = PxVec3(GU_EMPTY_BOUNDS_EXTENTS);
bounds[localIndex].maximum = PxVec3(-GU_EMPTY_BOUNDS_EXTENTS);
}
PxU32* mMapping = data.getUpdateMap();
if(gUpdateTreeWhenRemovingObject && mMapping)
{
// PT: note: the following codepath has only one part (as opposed to the equivalent code in AABBTreeUpdateMap)
// because it operates on our local indices, not on (pruning) pool indices. The difference is that our local
// array can have holes in it for removed objects, while the AABBTree's update code works with the PruningPool
// (no holes).
const PxU32 treeNodeIndex = mMapping[localIndex];
if(treeNodeIndex!=0xffffffff)
{
PX_ASSERT(treeNodeIndex < nbNodes);
PX_ASSERT(nodes[treeNodeIndex].isLeaf());
BVHNode* node = nodes + treeNodeIndex;
const PxU32 nbPrims = node->getNbRuntimePrimitives();
PX_ASSERT(nbPrims < 16);
// retrieve the primitives pointer
PxU32* primitives = node->getPrimitives(indices);
PX_ASSERT(primitives);
// PT: look for desired local index in the leaf
bool foundIt = false;
for(PxU32 i=0;i<nbPrims;i++)
{
PX_ASSERT(mMapping[primitives[i]] == treeNodeIndex); // PT: all primitives should point to the same leaf node
if(localIndex == primitives[i])
{
foundIt = true;
const PxU32 last = nbPrims-1;
node->setNbRunTimePrimitives(last);
primitives[i] = 0xffffffff; // Mark primitive index as invalid in the node
mMapping[localIndex] = 0xffffffff; // invalidate the node index for pool 0
// PT: swap within the leaf node. No need to update the mapping since they should all point
// to the same tree node anyway.
if(last!=i)
PxSwap(primitives[i], primitives[last]);
// PT: breaking here means we couldn't reuse that loop to update the node bounds
break;
}
}
PX_ASSERT(foundIt);
PX_UNUSED(foundIt);
}
}
}
return true;
}
void CompanionPrunerAABBTree::swapIndex(PxU32 objectIndex, PxU32 swapObjectIndex)
{
PX_UNUSED(objectIndex);
PX_UNUSED(swapObjectIndex);
}
PxU32 CompanionPrunerAABBTree::removeMarkedObjects(PxU32 timeStamp)
{
#ifdef USE_DEBUG_PRINTF
printf("removeMarkedObjects %d\n", timeStamp);
#endif
PX_UNUSED(timeStamp);
//printf("removeMarkedObjects %d\n", timeStamp);
mLastValidTimestamp = timeStamp+1;
// PT: TODO: consider updating our local data as well here but is it worth it?
if(0)
{
const PxU32 nbObjects = getNbObjectsFast();
for(PxU32 i=0;i<nbObjects;i++)
{
LocalData& localData = mLocalData[i];
if(localData.mTimestamp==timeStamp)
{
localData.setRemoved();
}
}
}
#ifdef USE_MAVERICK_NODE
mMaverick.removeMarkedObjects(timeStamp);
#endif
return 0;
}
void CompanionPrunerAABBTree::shiftOrigin(const PxVec3& shift)
{
if(mBVH)
{
BVHData& data = const_cast<BVHData&>(mBVH->getData());
PxU32 nbNodes = data.mNbNodes;
BVHNode* nodes = data.mNodes;
while(nbNodes--)
{
nodes->mBV.minimum -= shift;
nodes->mBV.maximum -= shift;
nodes++;
}
PxU32 nbObjects = getNbObjectsFast();
PxBounds3* bounds = data.mBounds.getBounds();
while(nbObjects--)
{
if(!bounds->isEmpty())
{
bounds->minimum -= shift;
bounds->maximum -= shift;
}
bounds++;
}
}
#ifdef USE_MAVERICK_NODE
mMaverick.shiftOrigin(shift);
#endif
}
void CompanionPrunerAABBTree::timeStampChange()
{
}
void CompanionPrunerAABBTree::build()
{
if(!mDirtyFlags) // PT: necessary, extended bucket pruner calls this without checking first
return;
const PxU32 needsRebuild = mDirtyFlags & NEEDS_REBUILD;
const PxU32 needsRefit = mDirtyFlags & NEEDS_REFIT;
mDirtyFlags = 0;
// PT: we want fast build for this one
const PxU32 numPrimsPerLeaf = 15;
if(needsRebuild)
{
PX_DELETE(mBVH);
PxU32 nbObjects = getNbObjectsFast();
if(!nbObjects)
return;
if(1)
{
// PT: you know what forget it just rebuild the whole map
PX_FREE(mRemap);
PxU32* newRemap = PX_ALLOCATE(PxU32, mMapSize, "Map");
PxMemSet(newRemap, 0xff, mMapSize*sizeof(PxU32));
mRemap = newRemap;
PxU32 offset = 0;
PxU32 nb = nbObjects;
while(nb--)
{
if(!mLocalData[offset].isValid(mLastValidTimestamp))
{
if(0 && mLocalData[offset].mHandle!=0xffffffff)
{
//PX_ASSERT(mRemap[mLocalData[offset].mHandle]==offset);
mRemap[mLocalData[offset].mHandle] = 0xffffffff;
}
// This object has been removed, plug the hole
const LocalData& movedData = mLocalData[--nbObjects];
if(movedData.isValid(mLastValidTimestamp))
{
#ifdef USE_DEBUG_PRINTF
printf("move %d %d from %d to %d\n", movedData.mHandle, movedData.mTimestamp, nbObjects, offset);
if(movedData.mHandle==22)
{
int stop = 1;
(void)stop;
}
#endif
//PX_ASSERT(mRemap[movedData.mHandle]==nbObjects);
//mRemap[movedData.mHandle] = offset;
mRemap[movedData.mHandle] = offset;
}
#ifdef USE_DEBUG_PRINTF
else
printf("skip remap %d %d from %d to %d\n", movedData.mHandle, movedData.mTimestamp, nbObjects, offset);
#endif
mLocalData[offset] = movedData;
}
else
{
mRemap[mLocalData[offset].mHandle] = offset;
offset++;
}
}
nbObjects = offset;
mLocalData.forceSize_Unsafe(offset);
if(!nbObjects)
return;
}
if(1)
{
AABBTreeBounds bounds;
bounds.init(nbObjects);
// PT: TODO: inflation?
const PxBounds3* currentBounds = mPool->getCurrentWorldBoxes();
PxBounds3* dst = bounds.getBounds();
for(PxU32 i=0; i<nbObjects; i++)
{
const LocalData& localData = mLocalData[i];
const PoolIndex poolIndex = mPool->getIndex(localData.mHandle);
dst[i] = currentBounds[poolIndex];
}
mBVH = PX_NEW(BVH)(NULL);
bool status = mBVH->init(nbObjects, &bounds, NULL, 0, BVH_SPLATTER_POINTS, numPrimsPerLeaf, 0.0);
PX_ASSERT(status);
PX_UNUSED(status);
}
{
BVHData& data = const_cast<BVHData&>(mBVH->getData());
data.createUpdateMap(getNbObjectsFast());
}
return;
}
if(needsRefit && mBVH)
{
BVHData& data = const_cast<BVHData&>(mBVH->getData());
data.refitMarkedNodes(data.mBounds.getBounds());
}
}
PxU32 CompanionPrunerAABBTree::getNbObjects() const
{
PxU32 nb = getNbObjectsFast();
#ifdef USE_MAVERICK_NODE
nb += mMaverick.getNbPrimitives();
#endif
return nb;
}
void CompanionPrunerAABBTree::release()
{
releaseInternal();
}
void CompanionPrunerAABBTree::visualize(PxRenderOutput& out, PxU32 color) const
{
visualizeTree(out, color, mBVH);
}
namespace
{
struct BVHTree
{
PX_FORCE_INLINE BVHTree(const BVHData& data) : mRootNode(data.mNodes), mIndices(data.mIndices) {}
const BVHNode* getNodes() const { return mRootNode; }
const PxU32* getIndices() const { return mIndices; }
const BVHNode* mRootNode;
const PxU32* mIndices;
};
struct RaycastAdapter
{
RaycastAdapter(const CompanionPrunerAABBTree& owner, PrunerRaycastCallback& cb, PxU32 lastValidTimestamp) : mOwner(owner), mCallback(cb), mLastValidTimestamp(lastValidTimestamp), mAbort(false) {}
PX_FORCE_INLINE bool invoke(PxReal& distance, PxU32 index)
{
if(!mOwner.mLocalData[index].isValid(mLastValidTimestamp))
return true; // PT: object has been removed, tree data hasn't been updated accordingly
const PxU32 handle = mOwner.mLocalData[index].mHandle;
// if(gUpdateTreeWhenRemovingObject)
{
PX_ASSERT(handle!=0xffffffff);
}
/* else
{
if(handle==0xffffffff)
{
// PT: object has been removed, tree data hasn't been updated accordingly
return true;
}
}*/
const PoolIndex poolIndex = mOwner.mPool->getIndex(handle);
const PxTransform* currentTransforms = mOwner.mPool->getTransforms();
const PrunerPayload* currentPayloads = mOwner.mPool->getObjects();
if(mAbort || !mCallback.invoke(distance, poolIndex, currentPayloads, currentTransforms))
{
mAbort = true;
return false;
}
return true;
}
const CompanionPrunerAABBTree& mOwner;
PrunerRaycastCallback& mCallback;
const PxU32 mLastValidTimestamp;
bool mAbort;
PX_NOCOPY(RaycastAdapter)
};
struct OverlapAdapter
{
OverlapAdapter(const CompanionPrunerAABBTree& owner, PrunerOverlapCallback& cb, PxU32 lastValidTimestamp) : mOwner(owner), mCallback(cb), mLastValidTimestamp(lastValidTimestamp), mAbort(false) {}
PX_FORCE_INLINE bool invoke(PxU32 index)
{
if(!mOwner.mLocalData[index].isValid(mLastValidTimestamp))
return true; // PT: object has been removed, tree data hasn't been updated accordingly
const PxU32 handle = mOwner.mLocalData[index].mHandle;
PX_ASSERT(handle!=0xffffffff);
const PoolIndex poolIndex = mOwner.mPool->getIndex(handle);
const PxTransform* currentTransforms = mOwner.mPool->getTransforms();
const PrunerPayload* currentPayloads = mOwner.mPool->getObjects();
if(mAbort || !mCallback.invoke(poolIndex, currentPayloads, currentTransforms))
{
mAbort = true;
return false;
}
return true;
}
const CompanionPrunerAABBTree& mOwner;
PrunerOverlapCallback& mCallback;
const PxU32 mLastValidTimestamp;
bool mAbort;
PX_NOCOPY(OverlapAdapter)
};
#ifdef USE_MAVERICK_NODE
struct MaverickRaycastAdapter
{
MaverickRaycastAdapter(const MaverickNode& owner, PrunerRaycastCallback& cb) : mOwner(owner), mCallback(cb), mAbort(false) {}
PX_FORCE_INLINE bool invoke(PxReal& distance, PxU32 index)
{
if(mAbort || !mCallback.invoke(distance, index, mOwner.mFreeObjects, mOwner.mFreeTransforms))
{
mAbort = true;
return false;
}
return true;
}
const MaverickNode& mOwner;
PrunerRaycastCallback& mCallback;
bool mAbort;
PX_NOCOPY(MaverickRaycastAdapter)
};
struct MaverickOverlapAdapter
{
MaverickOverlapAdapter(const MaverickNode& owner, PrunerOverlapCallback& cb) : mOwner(owner), mCallback(cb), mAbort(false) {}
PX_FORCE_INLINE bool invoke(PxU32 index)
{
if(mAbort || !mCallback.invoke(index, mOwner.mFreeObjects, mOwner.mFreeTransforms))
{
mAbort = true;
return false;
}
return true;
}
const MaverickNode& mOwner;
PrunerOverlapCallback& mCallback;
bool mAbort;
PX_NOCOPY(MaverickOverlapAdapter)
};
#endif
}
bool CompanionPrunerAABBTree::raycast(const PxVec3& origin, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
PX_UNUSED(origin);
PX_UNUSED(unitDir);
PX_UNUSED(inOutDistance);
PX_UNUSED(prunerCallback);
PX_ASSERT(!mDirtyFlags);
// if(mDirtyFlags)
// const_cast<CompanionPrunerAABBTree*>(this)->build();
#ifdef USE_MAVERICK_NODE
{
MaverickRaycastAdapter ra(mMaverick, prunerCallback);
Gu::RayAABBTest test(origin*2.0f, unitDir*2.0f, inOutDistance, PxVec3(0.0f));
if(!doLeafTest<false, true, MaverickNode, MaverickRaycastAdapter>(&mMaverick, test, mMaverick.mFreeBounds, NULL, inOutDistance, ra))
return false;
}
#endif
if(mBVH)
{
RaycastAdapter ra(*this, prunerCallback, mLastValidTimestamp);
return AABBTreeRaycast<false, true, BVHTree, BVHNode, RaycastAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), origin, unitDir, inOutDistance, PxVec3(0.0f), ra);
}
return true;
}
// PT: TODO: this is copied from SqBounds.h, should be either moved to Gu and shared or passed as a user parameter
#define SQ_PRUNER_EPSILON 0.005f
#define SQ_PRUNER_INFLATION (1.0f + SQ_PRUNER_EPSILON) // pruner test shape inflation (not narrow phase shape)
bool CompanionPrunerAABBTree::overlap(const ShapeData& queryVolume, PrunerOverlapCallback& prunerCallback) const
{
PX_UNUSED(queryVolume);
PX_UNUSED(prunerCallback);
PX_ASSERT(!mDirtyFlags);
// if(mDirtyFlags)
// const_cast<CompanionPrunerAABBTree*>(this)->build();
#ifdef USE_MAVERICK_NODE
{
MaverickOverlapAdapter ra(mMaverick, prunerCallback);
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
if(!doOverlapLeafTest<true, OBBAABBTest, MaverickNode, MaverickOverlapAdapter>(test, &mMaverick, mMaverick.mFreeBounds, NULL, ra))
return false;
}
else
{
const DefaultAABBAABBTest test(queryVolume);
if(!doOverlapLeafTest<true, AABBAABBTest, MaverickNode, MaverickOverlapAdapter>(test, &mMaverick, mMaverick.mFreeBounds, NULL, ra))
return false;
}
}
break;
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
if(!doOverlapLeafTest<true, CapsuleAABBTest, MaverickNode, MaverickOverlapAdapter>(test, &mMaverick, mMaverick.mFreeBounds, NULL, ra))
return false;
}
break;
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
if(!doOverlapLeafTest<true, SphereAABBTest, MaverickNode, MaverickOverlapAdapter>(test, &mMaverick, mMaverick.mFreeBounds, NULL, ra))
return false;
}
break;
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
if(!doOverlapLeafTest<true, OBBAABBTest, MaverickNode, MaverickOverlapAdapter>(test, &mMaverick, mMaverick.mFreeBounds, NULL, ra))
return false;
}
break;
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
#endif
if(mBVH)
{
OverlapAdapter ra(*this, prunerCallback, mLastValidTimestamp);
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
return AABBTreeOverlap<true, OBBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), test, ra);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
return AABBTreeOverlap<true, AABBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), test, ra);
}
}
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, SQ_PRUNER_INFLATION);
//const DefaultCapsuleAABBTest test(queryVolume, 1.0f);
return AABBTreeOverlap<true, CapsuleAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), test, ra);
}
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
return AABBTreeOverlap<true, SphereAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), test, ra);
}
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
return AABBTreeOverlap<true, OBBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), test, ra);
}
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
}
return true;
}
bool CompanionPrunerAABBTree::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, PxReal& inOutDistance, PrunerRaycastCallback& prunerCallback) const
{
PX_UNUSED(queryVolume);
PX_UNUSED(unitDir);
PX_UNUSED(inOutDistance);
PX_UNUSED(prunerCallback);
PX_ASSERT(!mDirtyFlags);
// if(mDirtyFlags)
// const_cast<CompanionPrunerAABBTree*>(this)->build();
#ifdef USE_MAVERICK_NODE
{
MaverickRaycastAdapter ra(mMaverick, prunerCallback);
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
Gu::RayAABBTest test(aabb.getCenter()*2.0f, unitDir*2.0f, inOutDistance, aabb.getExtents());
if(!doLeafTest<true, true, MaverickNode, MaverickRaycastAdapter>(&mMaverick, test, mMaverick.mFreeBounds, NULL, inOutDistance, ra))
return false;
}
#endif
if(mBVH)
{
RaycastAdapter ra(*this, prunerCallback, mLastValidTimestamp);
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
return AABBTreeRaycast<true, true, BVHTree, BVHNode, RaycastAdapter>()(mBVH->getData().mBounds, BVHTree(mBVH->getData()), aabb.getCenter(), unitDir, inOutDistance, aabb.getExtents(), ra);
}
return true;
}
class PxBounds3Padded : public PxBounds3
{
public:
PX_FORCE_INLINE PxBounds3Padded() {}
PX_FORCE_INLINE ~PxBounds3Padded() {}
PxU32 padding;
};
void CompanionPrunerAABBTree::getGlobalBounds(PxBounds3& bounds) const
{
PxBounds3Padded tmp;
if(mBVH)
{
tmp.minimum = mBVH->getNodes()->mBV.minimum;
tmp.maximum = mBVH->getNodes()->mBV.maximum;
}
else
tmp.setEmpty();
Vec4V minV = V4LoadU(&tmp.minimum.x);
Vec4V maxV = V4LoadU(&tmp.maximum.x);
#ifdef USE_MAVERICK_NODE
{
PxU32 nbFree = mMaverick.mNbFree;
if(nbFree)
{
const PxBounds3* freeBounds = mMaverick.mFreeBounds;
while(nbFree--)
{
minV = V4Min(minV, V4LoadU(&freeBounds->minimum.x));
maxV = V4Max(maxV, V4LoadU(&freeBounds->maximum.x));
freeBounds++;
}
}
}
#endif
StoreBounds(bounds, minV, maxV);
}
CompanionPruner* physx::Gu::createCompanionPruner(PxU64 contextID, CompanionPrunerType type, const PruningPool* pool)
{
if(0)
// return NULL;
return PX_NEW(CompanionPrunerAABBTree)(contextID, pool);
//return PX_NEW(CompanionPrunerBucket);
// return PX_NEW(CompanionPrunerIncremental)(pool);
PX_UNUSED(contextID);
switch(type)
{
case COMPANION_PRUNER_NONE: return NULL;
case COMPANION_PRUNER_BUCKET: return PX_NEW(CompanionPrunerBucket);
case COMPANION_PRUNER_INCREMENTAL: return PX_NEW(CompanionPrunerIncremental)(pool);
case COMPANION_PRUNER_AABB_TREE: return PX_NEW(CompanionPrunerAABBTree)(contextID, pool);
}
return NULL;
}
| 35,587 | C++ | 32.197761 | 197 | 0.723242 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuSweepMTD.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SWEEP_MTD_H
#define GU_SWEEP_MTD_H
namespace physx
{
class PxConvexMeshGeometry;
class PxTriangleMeshGeometry;
class PxGeometry;
class PxHeightFieldGeometry;
namespace Gu
{
class Sphere;
class Capsule;
bool computeCapsule_TriangleMeshMTD(const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, Gu::CapsuleV& capsuleV, PxReal inflatedRadius, bool isDoubleSided, PxGeomSweepHit& hit);
bool computeCapsule_HeightFieldMTD(const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, Gu::CapsuleV& capsuleV, PxReal inflatedRadius, bool isDoubleSided, PxGeomSweepHit& hit);
bool computeBox_TriangleMeshMTD(const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, const Gu::Box& box, const PxTransform& boxTransform, PxReal inflation,
bool isDoubleSided, PxGeomSweepHit& hit);
bool computeBox_HeightFieldMTD( const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, const Gu::Box& box, const PxTransform& boxTransform, PxReal inflation, bool isDoubleSided, PxGeomSweepHit& hit);
bool computeConvex_TriangleMeshMTD( const PxTriangleMeshGeometry& triMeshGeom, const PxTransform& pose, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexTransform, PxReal inflation,
bool isDoubleSided, PxGeomSweepHit& hit);
bool computeConvex_HeightFieldMTD( const PxHeightFieldGeometry& heightFieldGeom, const PxTransform& pose, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexTransform, PxReal inflation, bool isDoubleSided, PxGeomSweepHit& hit);
bool computeSphere_SphereMTD(const Sphere& sphere0, const Sphere& sphere1, PxGeomSweepHit& hit);
bool computeSphere_CapsuleMTD(const Sphere& sphere, const Capsule& capsule, PxGeomSweepHit& hit);
bool computeCapsule_CapsuleMTD(const Capsule& capsule0, const Capsule& capsule1, PxGeomSweepHit& hit);
bool computePlane_CapsuleMTD(const PxPlane& plane, const Capsule& capsule, PxGeomSweepHit& hit);
bool computePlane_BoxMTD(const PxPlane& plane, const Box& box, PxGeomSweepHit& hit);
bool computePlane_ConvexMTD(const PxPlane& plane, const PxConvexMeshGeometry& convexGeom, const PxTransform& convexPose, PxGeomSweepHit& hit);
// PT: wrapper just to avoid duplicating these lines.
PX_FORCE_INLINE void setupSweepHitForMTD(PxGeomSweepHit& sweepHit, bool hasContacts, const PxVec3& unitDir)
{
sweepHit.flags = PxHitFlag::eNORMAL | PxHitFlag::eFACE_INDEX;
if(!hasContacts)
{
sweepHit.distance = 0.0f;
sweepHit.normal = -unitDir;
}
else
{
//ML: touching contact. We need to overwrite the normal to the negative of sweep direction
if(sweepHit.distance == 0.0f && sweepHit.normal.isZero())
sweepHit.normal = -unitDir;
sweepHit.flags |= PxHitFlag::ePOSITION;
}
}
}
}
#endif
| 4,469 | C | 48.120879 | 242 | 0.780712 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuAABBPruner.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_AABB_PRUNER_H
#define GU_AABB_PRUNER_H
#include "common/PxPhysXCommonConfig.h"
#include "GuExtendedBucketPruner.h"
#include "GuSqInternal.h"
#include "GuPruningPool.h"
#include "GuAABBTree.h"
#include "GuAABBTreeUpdateMap.h"
#include "GuAABBTreeBuildStats.h"
namespace physx
{
namespace Gu
{
// PT: we build the new tree over a number of frames/states, in order to limit perf spikes in 'updatePruningTrees'.
// The states are as follows:
//
// BUILD_NOT_STARTED (1 frame, AABBPruner):
//
// This is the initial state, before the new (AABBTree) build even starts. In this frame/state, we perform the AABBPruner-related
// memory allocations:
// - the new AABB tree is allocated
// - the array of cached bounding boxes is allocated and filled
//
// BUILD_INIT (1 frame, AABBTree):
//
// This is the first frame in which the new tree gets built. It deserves its own special state since various things happen in the
// first frame, that do no happen in subsequent frames. Basically most initial AABBTree-related allocations happen here (but no
// build step per se).
//
// BUILD_IN_PROGRESS (N frames, AABBTree):
//
// This is the core build function, actually building the tree. This should be mostly allocation-free, except here and there when
// building non-complete trees, and during the last call when the tree is finally built.
//
// BUILD_NEW_MAPPING (1 frame, AABBPruner):
//
// After the new AABBTree is built, we recreate an AABBTreeUpdateMap for the new tree, and use it to invalidate nodes whose objects
// have been removed during the build.
//
// We need to do that before doing a full refit in the next stage/frame. If we don't do that, the refit code will fetch a wrong box,
// that may very well belong to an entirely new object.
//
// Note that this mapping/update map (mNewTreeMap) is temporary, and only needed for the next stage.
//
// BUILD_FULL_REFIT (1 frame, AABBPruner):
//
// Once the new update map is available, we fully refit the new tree. AABBs of moved objects get updated. AABBs of removed objects
// become empty.
//
// BUILD_LAST_FRAME (1 frame, AABBPruner):
//
// This is an artificial frame used to delay the tree switching code. The switch happens as soon as we reach the BUILD_FINISHED
// state, but we don't want to execute BUILD_FULL_REFIT and the switch in the same frame. This extra BUILD_LAST_FRAME stage buys
// us one frame, i.e. we have one frame in which we do BUILD_FULL_REFIT, and in the next frame we'll do both BUILD_LAST_FRAME /
// BUILD_FINISHED / the switch.
//
// BUILD_FINISHED (1 frame, AABBPruner):
//
// Several things happen in this 'finalization' frame/stage:
// - We switch the trees (old one is deleted, cached boxes are deleted, new tree pointer is setup)
// - A new (final) update map is created (mTreeMap). The map is used to invalidate objects that may have been removed during
// the BUILD_NEW_MAPPING and BUILD_FULL_REFIT frames. The nodes containing these removed objects are marked for refit.
// - Nodes containing objects that have moved during the BUILD_NEW_MAPPING and BUILD_FULL_REFIT frames are marked for refit.
// - We do a partial refit on the new tree, to take these final changes into account. This small partial refit is usually much
// cheaper than the full refit we previously performed here.
// - We remove old objects from the bucket pruner
//
enum BuildStatus
{
BUILD_NOT_STARTED,
BUILD_INIT,
BUILD_IN_PROGRESS,
BUILD_NEW_MAPPING,
BUILD_FULL_REFIT,
BUILD_LAST_FRAME,
BUILD_FINISHED,
BUILD_FORCE_DWORD = 0xffffffff
};
// This class implements the Pruner interface for internal SQ use with some additional specialized functions
// The underlying data structure is a binary AABB tree
// AABBPruner supports insertions, removals and updates for dynamic objects
// The tree is either entirely rebuilt in a single frame (static pruner) or progressively rebuilt over multiple frames (dynamic pruner)
// The rebuild happens on a copy of the tree
// the copy is then swapped with current tree at the time commit() is called (only if mBuildState is BUILD_FINISHED),
// otherwise commit() will perform a refit operation applying any pending changes to the current tree
// While the tree is being rebuilt a temporary data structure (BucketPruner) is also kept in sync and used to speed up
// queries on updated objects that are not yet in either old or new tree.
// The requirements on the order of calls:
// commit() is required to be called before any queries to apply modifications
// queries can be issued on multiple threads after commit is called
// commit, buildStep, add/remove/update have to be called from the same thread or otherwise strictly serialized by external code
// and cannot be issued while a query is running
class AABBPruner : public DynamicPruner
{
PX_NOCOPY(AABBPruner)
public:
PX_PHYSX_COMMON_API AABBPruner(bool incrementalRebuild, PxU64 contextID, CompanionPrunerType cpType, BVHBuildStrategy buildStrategy=BVH_SPLATTER_POINTS, PxU32 nbObjectsPerNode=4); // true is equivalent to former dynamic pruner
virtual ~AABBPruner();
// BasePruner
DECLARE_BASE_PRUNER_API
//~BasePruner
// Pruner
DECLARE_PRUNER_API_COMMON
virtual bool isDynamic() const { return mIncrementalRebuild; }
//~Pruner
// DynamicPruner
virtual void setRebuildRateHint(PxU32 nbStepsForRebuild); // Besides the actual rebuild steps, 3 additional steps are needed.
virtual bool buildStep(bool synchronousCall = true); // returns true if finished
virtual bool prepareBuild(); // returns true if new tree is needed
//~DynamicPruner
// direct access for test code
PX_FORCE_INLINE PxU32 getNbAddedObjects() const { return mBucketPruner.getNbObjects(); }
PX_FORCE_INLINE const AABBTree* getAABBTree() const { PX_ASSERT(!mUncommittedChanges); return mAABBTree; }
PX_FORCE_INLINE AABBTree* getAABBTree() { PX_ASSERT(!mUncommittedChanges); return mAABBTree; }
PX_FORCE_INLINE void setAABBTree(AABBTree* tree) { mAABBTree = tree; }
PX_FORCE_INLINE const AABBTree* hasAABBTree() const { return mAABBTree; }
PX_FORCE_INLINE BuildStatus getBuildStatus() const { return mProgress; }
// local functions
// private:
NodeAllocator mNodeAllocator;
AABBTree* mAABBTree; // current active tree
AABBTreeBuildParams mBuilder; // this class deals with the details of the actual tree building
BuildStats mBuildStats;
// tree with build in progress, assigned to mAABBTree in commit, when mProgress is BUILD_FINISHED
// created in buildStep(), BUILD_NOT_STARTED
// This is non-null when there is a tree rebuild going on in progress
// and thus also indicates that we have to start saving the fixups
AABBTree* mNewTree;
// during rebuild the pool might change so we need a copy of boxes for the tree build
AABBTreeBounds mCachedBoxes;
PxU32 mNbCachedBoxes;
// incremented in commit(), serves as a progress counter for rebuild
PxU32 mNbCalls;
// PT: incremented each time we start building a new tree (i.e. effectively identifies a given tree)
// Timestamp is passed to bucket pruner to mark objects added there, linking them to a specific tree.
// When switching to the new tree, timestamp is used to remove old objects (now in the new tree) from
// the bucket pruner.
PxU32 mTimeStamp;
// this pruner is used for queries on objects that are not in the current tree yet
// includes both the objects in the tree being rebuilt and all the objects added later
ExtendedBucketPruner mBucketPruner;
BuildStatus mProgress; // current state of second tree build progress
// Fraction (as in 1/Nth) of the total number of primitives
// that should be processed per step by the AABB builder
// so if this value is 1, all primitives will be rebuilt, 2 => 1/2 of primitives per step etc.
// see also mNbCalls, mNbCalls varies from 0 to mRebuildRateHint-1
PxU32 mRebuildRateHint;
// Estimate for how much work has to be done to rebuild the tree.
PxU32 mTotalWorkUnits;
// Term to correct the work unit estimate if the rebuild rate is not matched
PxI32 mAdaptiveRebuildTerm;
const PxU32 mNbObjectsPerNode;
const BVHBuildStrategy mBuildStrategy;
PruningPool mPool; // Pool of AABBs
// maps pruning pool indices to aabb tree indices
// maps to INVALID_NODE_ID if the pool entry was removed or "pool index is outside input domain"
// The map is the inverse of the tree mapping: (node[map[poolID]].primitive == poolID)
// So:
// treeNodeIndex = mTreeMap.operator[](poolIndex)
// aabbTree->treeNodes[treeNodeIndex].primitives[0] == poolIndex
AABBTreeUpdateMap mTreeMap;
// Temporary update map, see BuildStatus notes above for details
AABBTreeUpdateMap mNewTreeMap;
// This is only set once in the constructor and is equivalent to isDynamicTree
// if it set to false then a 1-shot rebuild is performed in commit()
// bucket pruner is only used with incremental rebuild
const bool mIncrementalRebuild;
// A rebuild can be triggered even when the Pruner is not dirty
// mUncommittedChanges is set to true in add, remove, update and buildStep
// mUncommittedChanges is set to false in commit
// mUncommittedChanges has to be false (commit() has to be called) in order to run a query as defined by the
// mUncommittedChanges is not set to true in add, when pruning structure is provided. Scene query shapes
// are merged to current AABB tree directly
// Pruner higher level API
bool mUncommittedChanges;
// A new AABB tree is built if an object was added, removed or updated
// Changing objects during a build will trigger another rebuild right afterwards
// this is set to true if a new tree has to be created again after the current rebuild is done
bool mNeedsNewTree;
// This struct is used to record modifications made to the pruner state
// while a tree is building in the background
// this is so we can apply the modifications to the tree at the time of completion
// the recorded fixup information is: removedIndex (in ::remove()) and
// lastIndexMoved which is the last index in the pruner array
// (since the way we remove from PruningPool is by swapping last into removed slot,
// we need to apply a fixup so that it syncs up that operation in the new tree)
struct NewTreeFixup
{
PX_FORCE_INLINE NewTreeFixup(PxU32 removedIndex_, PxU32 relocatedLastIndex_)
: removedIndex(removedIndex_), relocatedLastIndex(relocatedLastIndex_) {}
PxU32 removedIndex;
PxU32 relocatedLastIndex;
};
PxArray<NewTreeFixup> mNewTreeFixups;
PxArray<PoolIndex> mToRefit;
// Internal methods
bool fullRebuildAABBTree(); // full rebuild function, used with static pruner mode
void release();
void refitUpdatedAndRemoved();
void updateBucketPruner();
};
}
}
#endif
| 12,838 | C | 47.449056 | 233 | 0.73337 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuBVH.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxFoundation.h"
#include "foundation/PxFPU.h"
#include "foundation/PxPlane.h"
#include "geometry/PxGeometryInternal.h"
#include "GuBVH.h"
#include "GuAABBTreeQuery.h"
#include "GuAABBTreeNode.h"
#include "GuAABBTreeBuildStats.h"
#include "GuMeshFactory.h"
#include "GuQuery.h"
#include "CmSerialize.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
///////////////////////////////////////////////////////////////////////////////
// PT: these two functions moved from cooking
bool BVHData::build(PxU32 nbBounds, const void* boundsData, PxU32 boundsStride, float enlargement, PxU32 nbPrimsPerLeaf, BVHBuildStrategy bs)
{
if(!nbBounds || !boundsData || boundsStride<sizeof(PxBounds3) || enlargement<0.0f || nbPrimsPerLeaf>=16)
return false;
mBounds.init(nbBounds);
if(nbBounds)
{
const PxU8* sB = reinterpret_cast<const PxU8*>(boundsData);
for(PxU32 i=0; i<nbBounds-1; i++)
{
inflateBounds<true>(mBounds.getBounds()[i], *reinterpret_cast<const PxBounds3*>(sB), enlargement);
sB += boundsStride;
}
inflateBounds<false>(mBounds.getBounds()[nbBounds-1], *reinterpret_cast<const PxBounds3*>(sB), enlargement);
}
mNbIndices = nbBounds;
// build the BVH
BuildStats stats;
NodeAllocator nodeAllocator;
mIndices = buildAABBTree(AABBTreeBuildParams(nbPrimsPerLeaf, nbBounds, &mBounds, bs), nodeAllocator, stats);
if(!mIndices)
return false;
// store the computed hierarchy
mNbNodes = stats.getCount();
mNodes = PX_ALLOCATE(BVHNode, mNbNodes, "AABB tree nodes");
PX_ASSERT(mNbNodes==nodeAllocator.mTotalNbNodes);
// store the results into BVHNode list
if(nbPrimsPerLeaf==1)
{
// PT: with 1 prim/leaf we don't need the remap table anymore, we can just store the prim index in each tree node directly.
flattenTree(nodeAllocator, mNodes, mIndices);
PX_FREE(mIndices);
}
else
flattenTree(nodeAllocator, mNodes);
return true;
}
// A.B. move to load code
#define PX_BVH_STRUCTURE_VERSION 1
bool BVHData::save(PxOutputStream& stream, bool endian) const
{
// write header
if(!writeHeader('B', 'V', 'H', 'S', PX_BVH_STRUCTURE_VERSION, endian, stream))
return false;
// write mData members
writeDword(mNbIndices, endian, stream);
writeDword(mNbNodes, endian, stream);
// write indices and bounds
for(PxU32 i=0; i<mNbIndices; i++)
writeDword(mIndices[i], endian, stream);
const PxBounds3* bounds = mBounds.getBounds();
for(PxU32 i=0; i<mNbIndices; i++)
{
writeFloatBuffer(&bounds[i].minimum.x, 3, endian, stream);
writeFloatBuffer(&bounds[i].maximum.x, 3, endian, stream);
}
// write nodes
for(PxU32 i=0; i<mNbNodes; i++)
{
writeDword(mNodes[i].mData, endian, stream);
writeFloatBuffer(&mNodes[i].mBV.minimum.x, 3, endian, stream);
writeFloatBuffer(&mNodes[i].mBV.maximum.x, 3, endian, stream);
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
// PT: temporary for Kit
BVH::BVH(const PxBVHInternalData& data) :
PxBVH (PxType(PxConcreteType::eBVH), PxBaseFlags(0)),
mMeshFactory (NULL)
{
mData.mNbIndices = data.mNbIndices;
mData.mNbNodes = data.mNbNodes;
mData.mIndices = data.mIndices;
mData.mNodes = reinterpret_cast<BVHNode*>(data.mNodes);
mData.mBounds.setBounds(reinterpret_cast<PxBounds3*>(data.mBounds));
}
bool BVH::getInternalData(PxBVHInternalData& data, bool takeOwnership) const
{
data.mNbIndices = mData.mNbIndices;
data.mNbNodes = mData.mNbNodes;
data.mNodeSize = sizeof(BVHNode);
data.mNodes = mData.mNodes;
data.mIndices = mData.mIndices;
data.mBounds = const_cast<PxBounds3*>(mData.mBounds.getBounds());
if(takeOwnership)
const_cast<BVH*>(this)->mData.mBounds.takeOwnership();
return true;
}
bool physx::PxGetBVHInternalData(PxBVHInternalData& data, const PxBVH& bvh, bool takeOwnership)
{
return static_cast<const BVH&>(bvh).getInternalData(data, takeOwnership);
}
//~ PT: temporary for Kit
BVH::BVH(MeshFactory* factory) :
PxBVH (PxType(PxConcreteType::eBVH), PxBaseFlag::eOWNS_MEMORY | PxBaseFlag::eIS_RELEASABLE),
mMeshFactory (factory)
{
}
BVH::BVH(MeshFactory* factory, BVHData& bvhData) :
PxBVH (PxType(PxConcreteType::eBVH), PxBaseFlag::eOWNS_MEMORY | PxBaseFlag::eIS_RELEASABLE),
mMeshFactory (factory),
mData (bvhData)
{
}
BVH::~BVH()
{
}
bool BVH::init(PxU32 nbPrims, AABBTreeBounds* bounds, const void* boundsData, PxU32 stride, BVHBuildStrategy bs, PxU32 nbPrimsPerLeaf, float enlargement)
{
if(!nbPrims)
return false;
if(bounds)
{
mData.mBounds.moveFrom(*bounds);
}
else
{
mData.mBounds.init(nbPrims);
PxBounds3* dst = mData.mBounds.getBounds();
if(stride==sizeof(PxBounds3))
{
PxMemCopy(dst, boundsData, sizeof(PxBounds3)*nbPrims);
}
else
{
if(nbPrims)
{
const PxU8* sB = reinterpret_cast<const PxU8*>(boundsData);
for(PxU32 i=0; i<nbPrims-1; i++)
{
inflateBounds<true>(mData.mBounds.getBounds()[i], *reinterpret_cast<const PxBounds3*>(sB), enlargement);
sB += stride;
}
inflateBounds<false>(mData.mBounds.getBounds()[nbPrims-1], *reinterpret_cast<const PxBounds3*>(sB), enlargement);
}
}
}
mData.mNbIndices = nbPrims;
// build the BVH
BuildStats stats;
NodeAllocator nodeAllocator;
mData.mIndices = buildAABBTree(AABBTreeBuildParams(nbPrimsPerLeaf, nbPrims, &mData.mBounds, bs), nodeAllocator, stats);
if(!mData.mIndices)
return false;
// store the computed hierarchy
mData.mNbNodes = stats.getCount();
mData.mNodes = PX_ALLOCATE(BVHNode, mData.mNbNodes, "AABB tree nodes");
PX_ASSERT(mData.mNbNodes==nodeAllocator.mTotalNbNodes);
// store the results into BVHNode list
if(nbPrimsPerLeaf==1)
{
// PT: with 1 prim/leaf we don't need the remap table anymore, we can just store the prim index in each tree node directly.
flattenTree(nodeAllocator, mData.mNodes, mData.mIndices);
PX_FREE(mData.mIndices);
}
else
flattenTree(nodeAllocator, mData.mNodes);
return true;
}
bool BVH::load(PxInputStream& stream)
{
// Import header
PxU32 version;
bool mismatch;
if(!readHeader('B', 'V', 'H', 'S', version, mismatch, stream))
return false;
// read numVolumes, numNodes together
//ReadDwordBuffer(&mData.mNbIndices, 2, mismatch, stream);
mData.mNbIndices = readDword(mismatch, stream);
mData.mNbNodes = readDword(mismatch, stream);
// read indices
mData.mIndices = PX_ALLOCATE(PxU32, mData.mNbIndices, "BVH indices");
ReadDwordBuffer(mData.mIndices, mData.mNbIndices, mismatch, stream);
// read bounds
mData.mBounds.init(mData.mNbIndices);
readFloatBuffer(&mData.mBounds.getBounds()->minimum.x, mData.mNbIndices*(3 + 3), mismatch, stream);
// read nodes
mData.mNodes = PX_ALLOCATE(BVHNode, mData.mNbNodes, "BVH nodes");
for(PxU32 i = 0; i < mData.mNbNodes; i++)
{
ReadDwordBuffer(&mData.mNodes[i].mData, 1, mismatch, stream);
readFloatBuffer(&mData.mNodes[i].mBV.minimum.x, 3 + 3, mismatch, stream);
}
return true;
}
void BVH::release()
{
decRefCount();
}
void BVH::onRefCountZero()
{
::onRefCountZero(this, mMeshFactory, false, "PxBVH::release: double deletion detected!");
}
namespace
{
struct BVHTree
{
PX_FORCE_INLINE BVHTree(const BVHData& data) : mRootNode(data.mNodes), mIndices(data.mIndices) {}
const BVHNode* getNodes() const { return mRootNode; }
const PxU32* getIndices() const { return mIndices; }
const BVHNode* mRootNode;
const PxU32* mIndices;
};
}
namespace
{
struct RaycastAdapter
{
RaycastAdapter(PxBVH::RaycastCallback& cb) : mCallback(cb), mAbort(false) {}
PX_FORCE_INLINE bool invoke(PxReal& distance, PxU32 index)
{
if(mAbort || !mCallback.reportHit(index, distance))
{
mAbort = true;
return false;
}
return true;
}
PxBVH::RaycastCallback& mCallback;
bool mAbort;
PX_NOCOPY(RaycastAdapter)
};
}
bool BVH::raycast(const PxVec3& origin, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const
{
PX_SIMD_GUARD_CNDT(flags & PxGeometryQueryFlag::eSIMD_GUARD)
RaycastAdapter ra(cb);
if(mData.mIndices)
return AABBTreeRaycast<false, true, BVHTree, BVHNode, RaycastAdapter>()(mData.mBounds, BVHTree(mData), origin, unitDir, distance, PxVec3(0.0f), ra);
else
return AABBTreeRaycast<false, false, BVHTree, BVHNode, RaycastAdapter>()(mData.mBounds, BVHTree(mData), origin, unitDir, distance, PxVec3(0.0f), ra);
}
namespace
{
struct OverlapAdapter
{
OverlapAdapter(PxBVH::OverlapCallback& cb) : mCallback(cb), mAbort(false) {}
PX_FORCE_INLINE bool invoke(PxU32 index)
{
if(mAbort || !mCallback.reportHit(index))
{
mAbort = true;
return false;
}
return true;
}
PxBVH::OverlapCallback& mCallback;
bool mAbort;
PX_NOCOPY(OverlapAdapter)
};
}
bool BVH::overlap(const ShapeData& queryVolume, OverlapCallback& cb, PxGeometryQueryFlags flags) const
{
PX_SIMD_GUARD_CNDT(flags & PxGeometryQueryFlag::eSIMD_GUARD)
OverlapAdapter oa(cb);
switch(queryVolume.getType())
{
case PxGeometryType::eBOX:
{
if(queryVolume.isOBB())
{
const DefaultOBBAABBTest test(queryVolume);
if(mData.mIndices)
return AABBTreeOverlap<true, OBBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
else
return AABBTreeOverlap<false, OBBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
}
else
{
const DefaultAABBAABBTest test(queryVolume);
if(mData.mIndices)
return AABBTreeOverlap<true, AABBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
else
return AABBTreeOverlap<false, AABBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
}
}
case PxGeometryType::eCAPSULE:
{
const DefaultCapsuleAABBTest test(queryVolume, 1.0f);
if(mData.mIndices)
return AABBTreeOverlap<true, CapsuleAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
else
return AABBTreeOverlap<false, CapsuleAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
}
case PxGeometryType::eSPHERE:
{
const DefaultSphereAABBTest test(queryVolume);
if(mData.mIndices)
return AABBTreeOverlap<true, SphereAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
else
return AABBTreeOverlap<false, SphereAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
}
case PxGeometryType::eCONVEXMESH:
{
const DefaultOBBAABBTest test(queryVolume);
if(mData.mIndices)
return AABBTreeOverlap<true, OBBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
else
return AABBTreeOverlap<false, OBBAABBTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
}
default:
PX_ALWAYS_ASSERT_MESSAGE("unsupported overlap query volume geometry type");
}
return false;
}
bool BVH::overlap(const PxGeometry& geom, const PxTransform& pose, OverlapCallback& cb, PxGeometryQueryFlags flags) const
{
const ShapeData queryVolume(geom, pose, 0.0f);
return overlap(queryVolume, cb, flags);
}
bool BVH::sweep(const ShapeData& queryVolume, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const
{
PX_SIMD_GUARD_CNDT(flags & PxGeometryQueryFlag::eSIMD_GUARD)
const PxBounds3& aabb = queryVolume.getPrunerInflatedWorldAABB();
RaycastAdapter ra(cb);
if(mData.mIndices)
return AABBTreeRaycast<true, true, BVHTree, BVHNode, RaycastAdapter>()(mData.mBounds, BVHTree(mData), aabb.getCenter(), unitDir, distance, aabb.getExtents(), ra);
else
return AABBTreeRaycast<true, false, BVHTree, BVHNode, RaycastAdapter>()(mData.mBounds, BVHTree(mData), aabb.getCenter(), unitDir, distance, aabb.getExtents(), ra);
}
bool BVH::sweep(const PxGeometry& geom, const PxTransform& pose, const PxVec3& unitDir, float distance, RaycastCallback& cb, PxGeometryQueryFlags flags) const
{
const ShapeData queryVolume(geom, pose, 0.0f);
return sweep(queryVolume, unitDir, distance, cb, flags);
}
namespace
{
PX_FORCE_INLINE bool planesAABBOverlap(const PxVec3& m, const PxVec3& d, const PxPlane* p, PxU32& outClipMask, PxU32 inClipMask)
{
PxU32 mask = 1;
PxU32 tmpOutClipMask = 0;
while(mask<=inClipMask)
{
if(inClipMask & mask)
{
const float NP = d.x*fabsf(p->n.x) + d.y*fabsf(p->n.y) + d.z*fabsf(p->n.z);
const float MP = m.x*p->n.x + m.y*p->n.y + m.z*p->n.z + p->d;
if(NP < MP)
return false;
if((-NP) < MP)
tmpOutClipMask |= mask;
}
mask+=mask;
p++;
}
outClipMask = tmpOutClipMask;
return true;
}
struct FrustumTest
{
FrustumTest(PxU32 nbPlanes, const PxPlane* planes) : mPlanes(planes), mMask((1<<nbPlanes)-1), mNbPlanes(nbPlanes), mOutClipMask(0)
{
}
PX_FORCE_INLINE PxIntBool operator()(const Vec3V boxCenter, const Vec3V boxExtents) const
{
// PT: TODO: rewrite all this in SIMD
PxVec3 center, extents;
V3StoreU(boxCenter, center);
V3StoreU(boxExtents, extents);
if(!planesAABBOverlap(center, extents, mPlanes, mOutClipMask, mMask))
return PxIntFalse;
// PT: unfortunately the AABBTreeOverlap template doesn't support this case where we know we can
// immediately dump the rest of the tree (i.e. the old "containment tests" in Opcode). We might
// want to revisit this at some point.
//
// In fact it's worse than this: we lost the necessary data to make this quick, in "flattenTree"
// when going from AABBTreeBuildNodes to BVHNodes. The BVHNodes lost the primitive-related info
// for internal (non-leaf) nodes so we cannot just dump a list of primitives when an internal
// node is fully visible (like we did in Opcode 1.x). Best we can do is keep traversing the tree
// and skip VFC tests.
//if(!outClipMask)
return PxIntTrue;
}
const PxPlane* mPlanes;
const PxU32 mMask;
const PxU32 mNbPlanes;
mutable PxU32 mOutClipMask;
PX_NOCOPY(FrustumTest)
};
}
static bool dumpNode(OverlapAdapter& oa, const BVHNode* const nodeBase, const BVHNode* node0, const PxU32* indices)
{
PxInlineArray<const BVHNode*, RAW_TRAVERSAL_STACK_SIZE> stack;
stack.forceSize_Unsafe(RAW_TRAVERSAL_STACK_SIZE);
stack[0] = node0;
PxU32 stackIndex = 1;
while(stackIndex > 0)
{
const BVHNode* node = stack[--stackIndex];
while(1)
{
if(node->isLeaf())
{
PxU32 nbPrims = node->getNbPrimitives();
const PxU32* prims = indices ? node->getPrimitives(indices) : NULL;
while(nbPrims--)
{
const PxU32 primIndex = indices ? *prims++ : node->getPrimitiveIndex();
if(!oa.invoke(primIndex))
return false;
}
break;
}
else
{
const BVHNode* children = node->getPos(nodeBase);
node = children;
stack[stackIndex++] = children + 1;
if(stackIndex == stack.capacity())
stack.resizeUninitialized(stack.capacity() * 2);
}
}
}
return true;
}
bool BVH::cull(PxU32 nbPlanes, const PxPlane* planes, OverlapCallback& cb, PxGeometryQueryFlags flags) const
{
PX_SIMD_GUARD_CNDT(flags & PxGeometryQueryFlag::eSIMD_GUARD)
OverlapAdapter oa(cb);
const FrustumTest test(nbPlanes, planes);
if(0)
{
// PT: this vanilla codepath is slower
if(mData.mIndices)
return AABBTreeOverlap<true, FrustumTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
else
return AABBTreeOverlap<false, FrustumTest, BVHTree, BVHNode, OverlapAdapter>()(mData.mBounds, BVHTree(mData), test, oa);
}
else
{
const PxBounds3* bounds = mData.mBounds.getBounds();
const bool hasIndices = mData.mIndices!=NULL;
PxInlineArray<const BVHNode*, RAW_TRAVERSAL_STACK_SIZE> stack;
stack.forceSize_Unsafe(RAW_TRAVERSAL_STACK_SIZE);
const BVHNode* const nodeBase = mData.mNodes;
stack[0] = nodeBase;
PxU32 stackIndex = 1;
while(stackIndex > 0)
{
const BVHNode* node = stack[--stackIndex];
Vec3V center, extents;
node->getAABBCenterExtentsV(¢er, &extents);
while(test(center, extents))
{
if(!test.mOutClipMask)
{
if(!dumpNode(oa, nodeBase, node, mData.mIndices))
return false;
break;
}
else
{
if(node->isLeaf())
{
PxU32 nbPrims = node->getNbPrimitives();
const bool doBoxTest = nbPrims > 1;
const PxU32* prims = hasIndices ? node->getPrimitives(mData.mIndices) : NULL;
while(nbPrims--)
{
const PxU32 primIndex = hasIndices ? *prims++ : node->getPrimitiveIndex();
if(doBoxTest)
{
Vec4V center2, extents2;
getBoundsTimesTwo(center2, extents2, bounds, primIndex);
const float half = 0.5f;
const FloatV halfV = FLoad(half);
const Vec4V extents_ = V4Scale(extents2, halfV);
const Vec4V center_ = V4Scale(center2, halfV);
if(!test(Vec3V_From_Vec4V(center_), Vec3V_From_Vec4V(extents_)))
continue;
}
if(!oa.invoke(primIndex))
return false;
}
break;
}
const BVHNode* children = node->getPos(nodeBase);
node = children;
stack[stackIndex++] = children + 1;
if(stackIndex == stack.capacity())
stack.resizeUninitialized(stack.capacity() * 2);
node->getAABBCenterExtentsV(¢er, &extents);
}
}
}
return true;
}
}
void BVH::refit()
{
mData.fullRefit(mData.mBounds.getBounds());
}
bool BVH::updateBoundsInternal(PxU32 localIndex, const PxBounds3& newBounds)
{
if(localIndex>=mData.mNbIndices)
return false;
PxBounds3* bounds = mData.mBounds.getBounds();
bounds[localIndex] = newBounds;
// Lazy-create update map
if(!mData.getUpdateMap())
mData.createUpdateMap(mData.mNbIndices);
PxU32* mMapping = mData.getUpdateMap();
if(mMapping)
{
const PxU32 treeNodeIndex = mMapping[localIndex];
if(treeNodeIndex!=0xffffffff)
{
mData.markNodeForRefit(treeNodeIndex);
return true;
}
}
return false;
}
bool BVH::updateBounds(PxU32 boundsIndex, const PxBounds3& newBounds)
{
return updateBoundsInternal(boundsIndex, newBounds);
}
void BVH::partialRefit()
{
mData.refitMarkedNodes(mData.mBounds.getBounds());
}
bool BVH::traverse(TraversalCallback& cb) const
{
// PT: copy-pasted from AABBTreeOverlap and modified
PxInlineArray<const BVHNode*, RAW_TRAVERSAL_STACK_SIZE> stack;
stack.forceSize_Unsafe(RAW_TRAVERSAL_STACK_SIZE);
const BVHNode* const nodeBase = mData.getNodes();
stack[0] = nodeBase;
PxU32 stackIndex = 1;
while(stackIndex > 0)
{
const BVHNode* node = stack[--stackIndex];
while(cb.visitNode(node->mBV))
{
if(node->isLeaf())
{
if(mData.getIndices())
{
if(!cb.reportLeaf(node->getNbPrimitives(), node->getPrimitives(mData.getIndices())))
return false;
}
else
{
PX_ASSERT(node->getNbPrimitives()==1);
const PxU32 primIndex = node->getPrimitiveIndex();
if(!cb.reportLeaf(node->getNbPrimitives(), &primIndex))
return false;
}
break;
}
const BVHNode* children = node->getPos(nodeBase);
node = children;
stack[stackIndex++] = children + 1;
if(stackIndex == stack.capacity())
stack.resizeUninitialized(stack.capacity() * 2);
}
}
return true;
}
#include "geometry/PxMeshQuery.h"
#define GU_BVH_STACK_SIZE 1024 // Default size of local stacks for non-recursive traversals.
static bool doLeafVsLeaf(PxReportCallback<PxGeomIndexPair>& callback, const BVHNode* node0, const PxBounds3* bounds0, const PxU32* indices0,
const BVHNode* node1, const PxBounds3* bounds1, const PxU32* indices1,
bool& abort)
{
PxGeomIndexPair* dst = callback.mBuffer;
PxU32 capacity = callback.mCapacity;
PxU32 currentSize = callback.mSize;
PX_ASSERT(currentSize<capacity);
bool foundHit = false;
abort = false;
const FloatV halfV = FLoad(0.5f);
PxU32 nbPrims0 = node0->getNbPrimitives();
const PxU32* prims0 = indices0 ? node0->getPrimitives(indices0) : NULL;
while(nbPrims0--)
{
const PxU32 primIndex0 = prims0 ? *prims0++ : node0->getPrimitiveIndex();
Vec3V center0, extents0;
{
Vec4V center2, extents2;
getBoundsTimesTwo(center2, extents2, bounds0, primIndex0);
extents0 = Vec3V_From_Vec4V(V4Scale(extents2, halfV));
center0 = Vec3V_From_Vec4V(V4Scale(center2, halfV));
}
PxU32 nbPrims1 = node1->getNbPrimitives();
const PxU32* prims1 = indices1 ? node1->getPrimitives(indices1) : NULL;
while(nbPrims1--)
{
const PxU32 primIndex1 = prims1 ? *prims1++ : node1->getPrimitiveIndex();
Vec3V center1, extents1;
{
Vec4V center2, extents2;
getBoundsTimesTwo(center2, extents2, bounds1, primIndex1);
extents1 = Vec3V_From_Vec4V(V4Scale(extents2, halfV));
center1 = Vec3V_From_Vec4V(V4Scale(center2, halfV));
}
if(PxIntBool(V3AllGrtrOrEq(V3Add(extents0, extents1), V3Abs(V3Sub(center1, center0)))))
{
foundHit = true;
// PT: TODO: refactor callback management code with BVH34
dst[currentSize].id0 = primIndex0;
dst[currentSize].id1 = primIndex1;
currentSize++;
if(currentSize==capacity)
{
callback.mSize = 0;
if(!callback.flushResults(currentSize, dst))
{
abort = true;
return foundHit;
}
dst = callback.mBuffer;
capacity = callback.mCapacity;
currentSize = callback.mSize;
}
}
}
}
callback.mSize = currentSize;
return foundHit;
}
static PX_FORCE_INLINE void pushChildren(PxGeomIndexPair* stack, PxU32& nb, PxU32 a, PxU32 b, PxU32 c, PxU32 d)
{
stack[nb].id0 = a;
stack[nb].id1 = b;
nb++;
stack[nb].id0 = c;
stack[nb].id1 = d;
nb++;
}
static PX_NOINLINE bool abortQuery(PxReportCallback<PxGeomIndexPair>& callback, bool& abort)
{
abort = true;
callback.mSize = 0;
return true;
}
static bool BVH_BVH(PxReportCallback<PxGeomIndexPair>& callback, const BVH& tree0, const BVH& tree1, bool& _abort)
{
const BVHNode* PX_RESTRICT node0 = tree0.getNodes();
const BVHNode* PX_RESTRICT node1 = tree1.getNodes();
PX_ASSERT(node0 && node1);
const PxBounds3* bounds0 = tree0.getData().mBounds.getBounds();
const PxBounds3* bounds1 = tree1.getData().mBounds.getBounds();
const PxU32* indices0 = tree0.getIndices();
const PxU32* indices1 = tree1.getIndices();
{
PxU32 nb=1;
PxGeomIndexPair stack[GU_BVH_STACK_SIZE];
stack[0].id0 = 0;
stack[0].id1 = 0;
bool status = false;
const BVHNode* const root0 = node0;
const BVHNode* const root1 = node1;
do
{
const PxGeomIndexPair& childData = stack[--nb];
node0 = root0 + childData.id0;
node1 = root1 + childData.id1;
if(node0->mBV.intersects(node1->mBV))
{
const PxU32 isLeaf0 = node0->isLeaf();
const PxU32 isLeaf1 = node1->isLeaf();
if(isLeaf0)
{
if(isLeaf1)
{
bool abort;
if(doLeafVsLeaf(callback, node0, bounds0, indices0, node1, bounds1, indices1, abort))
status = true;
if(abort)
return abortQuery(callback, _abort);
}
else
{
const PxU32 posIndex1 = node1->getPosIndex();
pushChildren(stack, nb, childData.id0, posIndex1, childData.id0, posIndex1 + 1);
}
}
else if(isLeaf1)
{
const PxU32 posIndex0 = node0->getPosIndex();
pushChildren(stack, nb, posIndex0, childData.id1, posIndex0 + 1, childData.id1);
}
else
{
const PxU32 posIndex0 = node0->getPosIndex();
const PxU32 posIndex1 = node1->getPosIndex();
pushChildren(stack, nb, posIndex0, posIndex1, posIndex0, posIndex1 + 1);
pushChildren(stack, nb, posIndex0 + 1, posIndex1, posIndex0 + 1, posIndex1 + 1);
}
}
}while(nb);
return status;
}
}
bool physx::PxFindOverlap(PxReportCallback<PxGeomIndexPair>& callback, const PxBVH& bvh0, const PxBVH& bvh1)
{
PX_SIMD_GUARD
// PT: TODO: refactor callback management code with BVH34
PxGeomIndexPair stackBuffer[256];
bool mustResetBuffer;
if(callback.mBuffer)
{
PX_ASSERT(callback.mCapacity);
mustResetBuffer = false;
}
else
{
callback.mBuffer = stackBuffer;
PX_ASSERT(callback.mCapacity<=256);
if(callback.mCapacity==0 || callback.mCapacity>256)
{
callback.mCapacity = 256;
}
callback.mSize = 0;
mustResetBuffer = true;
}
bool abort = false;
const bool status = BVH_BVH(callback, static_cast<const BVH&>(bvh0), static_cast<const BVH&>(bvh1), abort);
if(!abort)
{
const PxU32 currentSize = callback.mSize;
if(currentSize)
{
callback.mSize = 0;
callback.flushResults(currentSize, callback.mBuffer);
}
}
if(mustResetBuffer)
callback.mBuffer = NULL;
return status;
}
| 26,115 | C++ | 27.793826 | 165 | 0.700632 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/GuCookingSDF.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuCookingSDF.h"
#include "cooking/PxTriangleMeshDesc.h"
#include "GuSDF.h"
#include "GuCooking.h"
#include "PxSDFBuilder.h"
namespace physx
{
struct MeshData
{
MeshData(const PxTriangleMeshDesc& desc)
{
m_positions.resize(desc.points.count);
m_indices.resize(desc.triangles.count * 3);
immediateCooking::gatherStrided(desc.points.data, &m_positions[0], desc.points.count, sizeof(PxVec3), desc.points.stride);
immediateCooking::gatherStrided(desc.triangles.data, &m_indices[0], desc.triangles.count, 3 * sizeof(PxU32), desc.triangles.stride);
}
void GetBounds(PxVec3& outMinExtents, PxVec3& outMaxExtents) const
{
PxVec3 minExtents(FLT_MAX);
PxVec3 maxExtents(-FLT_MAX);
for (PxU32 i = 0; i < m_positions.size(); ++i)
{
const PxVec3& a = m_positions[i];
minExtents = a.minimum(minExtents);
maxExtents = a.maximum(maxExtents);
}
outMinExtents = minExtents;
outMaxExtents = maxExtents;
}
PxArray<PxVec3> m_positions;
PxArray<PxU32> m_indices;
};
PX_PHYSX_COMMON_API void quantizeSparseSDF(PxSdfBitsPerSubgridPixel::Enum bitsPerSubgridPixel,
const PxArray<PxReal>& uncompressedSdfDataSubgrids, PxArray<PxU8>& compressedSdfDataSubgrids,
PxReal subgridsMinSdfValue, PxReal subgridsMaxSdfValue)
{
PxU32 bytesPerPixel = PxU32(bitsPerSubgridPixel);
compressedSdfDataSubgrids.resize(uncompressedSdfDataSubgrids.size() * bytesPerPixel);
PxReal* ptr32 = reinterpret_cast<PxReal*>(compressedSdfDataSubgrids.begin());
PxU16* ptr16 = reinterpret_cast<PxU16*>(compressedSdfDataSubgrids.begin());
PxU8* ptr8 = compressedSdfDataSubgrids.begin();
PxReal s = 1.0f / (subgridsMaxSdfValue - subgridsMinSdfValue);
for (PxU32 i = 0; i < uncompressedSdfDataSubgrids.size(); ++i)
{
PxReal v = uncompressedSdfDataSubgrids[i];
PxReal vNormalized = (v - subgridsMinSdfValue) * s;
switch (bitsPerSubgridPixel)
{
case PxSdfBitsPerSubgridPixel::e8_BIT_PER_PIXEL:
ptr8[i] = PxU8(255.0f * vNormalized);
break;
case PxSdfBitsPerSubgridPixel::e16_BIT_PER_PIXEL:
ptr16[i] = PxU16(65535.0f * vNormalized);
break;
case PxSdfBitsPerSubgridPixel::e32_BIT_PER_PIXEL:
ptr32[i] = v;
break;
}
}
}
PX_FORCE_INLINE PxU32 idxCompact(PxU32 x, PxU32 y, PxU32 z, PxU32 width, PxU32 height)
{
return z * (width) * (height)+y * (width)+x;
}
void convert16To32Bits(PxSimpleTriangleMesh mesh, PxArray<PxU32>& indices32)
{
indices32.resize(3 * mesh.triangles.count);
if (mesh.flags & PxMeshFlag::e16_BIT_INDICES)
{
// conversion; 16 bit index -> 32 bit index & stride
PxU32* dest = indices32.begin();
const PxU32* pastLastDest = indices32.begin() + 3 * mesh.triangles.count;
const PxU8* source = reinterpret_cast<const PxU8*>(mesh.triangles.data);
while (dest < pastLastDest)
{
const PxU16 * trig16 = reinterpret_cast<const PxU16*>(source);
*dest++ = trig16[0];
*dest++ = trig16[1];
*dest++ = trig16[2];
source += mesh.triangles.stride;
}
}
else
{
immediateCooking::gatherStrided(mesh.triangles.data, indices32.begin(), mesh.triangles.count, sizeof(PxU32) * 3, mesh.triangles.stride);
}
}
static bool createSDFSparse(PxTriangleMeshDesc& desc, PxSDFDesc& sdfDesc, PxArray<PxReal>& sdfCoarse, PxArray<PxU8>& sdfDataSubgrids,
PxArray<PxU32>& sdfSubgridsStartSlots)
{
PX_ASSERT(sdfDesc.subgridSize > 0);
MeshData mesh(desc);
PxVec3 meshLower, meshUpper;
if (sdfDesc.sdfBounds.isEmpty())
mesh.GetBounds(meshLower, meshUpper);
else
{
meshLower = sdfDesc.sdfBounds.minimum;
meshUpper = sdfDesc.sdfBounds.maximum;
}
PxVec3 edges = meshUpper - meshLower;
const PxReal spacing = sdfDesc.spacing;
// tweak spacing to avoid edge cases for vertices laying on the boundary
// just covers the case where an edge is a whole multiple of the spacing.
PxReal spacingEps = spacing * (1.0f - 1e-4f);
// make sure to have at least one particle in each dimension
PxI32 dx, dy, dz;
dx = spacing > edges.x ? 1 : PxI32(edges.x / spacingEps);
dy = spacing > edges.y ? 1 : PxI32(edges.y / spacingEps);
dz = spacing > edges.z ? 1 : PxI32(edges.z / spacingEps);
dx += 4;
dy += 4;
dz += 4;
//Make sure that dx, dy and dz are multiple of subgridSize
dx = ((dx + sdfDesc.subgridSize - 1) / sdfDesc.subgridSize) * sdfDesc.subgridSize;
dy = ((dy + sdfDesc.subgridSize - 1) / sdfDesc.subgridSize) * sdfDesc.subgridSize;
dz = ((dz + sdfDesc.subgridSize - 1) / sdfDesc.subgridSize) * sdfDesc.subgridSize;
PX_ASSERT(dx % sdfDesc.subgridSize == 0);
PX_ASSERT(dy % sdfDesc.subgridSize == 0);
PX_ASSERT(dz % sdfDesc.subgridSize == 0);
// we shift the voxelization bounds so that the voxel centers
// lie symmetrically to the center of the object. this reduces the
// chance of missing features, and also better aligns the particles
// with the mesh
PxVec3 meshOffset;
meshOffset.x = 0.5f * (spacing - (edges.x - (dx - 1)*spacing));
meshOffset.y = 0.5f * (spacing - (edges.y - (dy - 1)*spacing));
meshOffset.z = 0.5f * (spacing - (edges.z - (dz - 1)*spacing));
meshLower -= meshOffset;
sdfDesc.meshLower = meshLower;
sdfDesc.dims.x = dx;
sdfDesc.dims.y = dy;
sdfDesc.dims.z = dz;
PxArray<PxU32> indices32;
PxArray<PxVec3> vertices;
const PxVec3* verticesPtr = NULL;
bool baseMeshSpecified = sdfDesc.baseMesh.triangles.data && sdfDesc.baseMesh.points.data;
if (baseMeshSpecified)
{
convert16To32Bits(sdfDesc.baseMesh, indices32);
if (sdfDesc.baseMesh.points.stride != sizeof(PxVec3))
{
vertices.resize(sdfDesc.baseMesh.points.count);
immediateCooking::gatherStrided(sdfDesc.baseMesh.points.data, vertices.begin(), sdfDesc.baseMesh.points.count, sizeof(PxVec3), sdfDesc.baseMesh.points.stride);
verticesPtr = vertices.begin();
}
else
verticesPtr = reinterpret_cast<const PxVec3*>(sdfDesc.baseMesh.points.data);
}
PxReal subgridsMinSdfValue = 0.0f;
PxReal subgridsMaxSdfValue = 1.0f;
PxReal narrowBandThickness = sdfDesc.narrowBandThicknessRelativeToSdfBoundsDiagonal * edges.magnitude();
if (sdfDesc.sdfBuilder == NULL)
{
PxArray<PxReal> denseSdf;
PxArray<PxReal> sparseSdf;
Gu::SDFUsingWindingNumbersSparse(
baseMeshSpecified ? verticesPtr : &mesh.m_positions[0],
baseMeshSpecified ? indices32.begin() : &mesh.m_indices[0],
baseMeshSpecified ? indices32.size() : mesh.m_indices.size(),
dx, dy, dz,
meshLower, meshLower + PxVec3(static_cast<PxReal>(dx), static_cast<PxReal>(dy), static_cast<PxReal>(dz)) * spacing, narrowBandThickness, sdfDesc.subgridSize,
sdfCoarse, sdfSubgridsStartSlots, sparseSdf, denseSdf, subgridsMinSdfValue, subgridsMaxSdfValue, 16, sdfDesc.sdfBuilder);
PxArray<PxReal> uncompressedSdfDataSubgrids;
Gu::convertSparseSDFTo3DTextureLayout(dx, dy, dz, sdfDesc.subgridSize, sdfSubgridsStartSlots.begin(), sparseSdf.begin(), sparseSdf.size(), uncompressedSdfDataSubgrids,
sdfDesc.sdfSubgrids3DTexBlockDim.x, sdfDesc.sdfSubgrids3DTexBlockDim.y, sdfDesc.sdfSubgrids3DTexBlockDim.z);
if (sdfDesc.bitsPerSubgridPixel == 4)
{
//32bit values are stored as normal floats while 16bit and 8bit values are scaled to 0...1 range and then scaled back to original range
subgridsMinSdfValue = 0.0f;
subgridsMaxSdfValue = 1.0f;
}
quantizeSparseSDF(sdfDesc.bitsPerSubgridPixel, uncompressedSdfDataSubgrids, sdfDataSubgrids,
subgridsMinSdfValue, subgridsMaxSdfValue);
}
else
{
PxU32* indices = baseMeshSpecified ? indices32.begin() : &mesh.m_indices[0];
PxU32 numTriangleIndices = baseMeshSpecified ? indices32.size() : mesh.m_indices.size();
const PxVec3* verts = baseMeshSpecified ? verticesPtr : &mesh.m_positions[0];
PxU32 numVertices = baseMeshSpecified ? sdfDesc.baseMesh.points.count : mesh.m_positions.size();
PxArray<PxU32> repairedIndices;
//Analyze the mesh to catch and fix some special cases
//There are meshes where every triangle is present once with cw and once with ccw orientation. Try to filter out only one set
Gu::analyzeAndFixMesh(verts, indices, numTriangleIndices, repairedIndices);
const PxU32* ind = repairedIndices.size() > 0 ? repairedIndices.begin() : indices;
if (repairedIndices.size() > 0)
numTriangleIndices = repairedIndices.size();
//The GPU SDF builder does sparse SDF, 3d texture layout and quantization in one go to best utilize the gpu
sdfDesc.sdfBuilder->buildSparseSDF(verts,
numVertices,
ind,
numTriangleIndices,
dx, dy, dz,
meshLower, meshLower + PxVec3(static_cast<PxReal>(dx), static_cast<PxReal>(dy), static_cast<PxReal>(dz)) * spacing, narrowBandThickness, sdfDesc.subgridSize, sdfDesc.bitsPerSubgridPixel,
sdfCoarse, sdfSubgridsStartSlots, sdfDataSubgrids, subgridsMinSdfValue, subgridsMaxSdfValue,
sdfDesc.sdfSubgrids3DTexBlockDim.x, sdfDesc.sdfSubgrids3DTexBlockDim.y, sdfDesc.sdfSubgrids3DTexBlockDim.z, 0);
}
sdfDesc.sdf.count = sdfCoarse.size();
sdfDesc.sdf.stride = sizeof(PxReal);
sdfDesc.sdf.data = sdfCoarse.begin();
sdfDesc.sdfSubgrids.count = sdfDataSubgrids.size();
sdfDesc.sdfSubgrids.stride = sizeof(PxU8);
sdfDesc.sdfSubgrids.data = sdfDataSubgrids.begin();
sdfDesc.sdfStartSlots.count = sdfSubgridsStartSlots.size();
sdfDesc.sdfStartSlots.stride = sizeof(PxU32);
sdfDesc.sdfStartSlots.data = sdfSubgridsStartSlots.begin();
sdfDesc.subgridsMinSdfValue = subgridsMinSdfValue;
sdfDesc.subgridsMaxSdfValue = subgridsMaxSdfValue;
return true;
}
static bool createSDF(PxTriangleMeshDesc& desc, PxSDFDesc& sdfDesc, PxArray<PxReal>& sdf, PxArray<PxU8>& sdfDataSubgrids, PxArray<PxU32>& sdfSubgridsStartSlots)
{
if (sdfDesc.subgridSize > 0)
{
return createSDFSparse(desc, sdfDesc, sdf, sdfDataSubgrids, sdfSubgridsStartSlots);
}
MeshData mesh(desc);
PxVec3 meshLower, meshUpper;
if (sdfDesc.sdfBounds.isEmpty())
mesh.GetBounds(meshLower, meshUpper);
else
{
meshLower = sdfDesc.sdfBounds.minimum;
meshUpper = sdfDesc.sdfBounds.maximum;
}
PxVec3 edges = meshUpper - meshLower;
const PxReal spacing = sdfDesc.spacing;
// tweak spacing to avoid edge cases for vertices laying on the boundary
// just covers the case where an edge is a whole multiple of the spacing.
PxReal spacingEps = spacing * (1.0f - 1e-4f);
// make sure to have at least one particle in each dimension
PxI32 dx, dy, dz;
dx = spacing > edges.x ? 1 : PxI32(edges.x / spacingEps);
dy = spacing > edges.y ? 1 : PxI32(edges.y / spacingEps);
dz = spacing > edges.z ? 1 : PxI32(edges.z / spacingEps);
dx += 4;
dy += 4;
dz += 4;
const PxU32 numVoxels = dx * dy * dz;
// we shift the voxelization bounds so that the voxel centers
// lie symmetrically to the center of the object. this reduces the
// chance of missing features, and also better aligns the particles
// with the mesh
PxVec3 meshOffset;
meshOffset.x = 0.5f * (spacing - (edges.x - (dx - 1)*spacing));
meshOffset.y = 0.5f * (spacing - (edges.y - (dy - 1)*spacing));
meshOffset.z = 0.5f * (spacing - (edges.z - (dz - 1)*spacing));
meshLower -= meshOffset;
sdfDesc.meshLower = meshLower;
sdfDesc.dims.x = dx;
sdfDesc.dims.y = dy;
sdfDesc.dims.z = dz;
sdf.resize(numVoxels);
PxArray<PxU32> indices32;
PxArray<PxVec3> vertices;
const PxVec3* verticesPtr = NULL;
bool baseMeshSpecified = sdfDesc.baseMesh.triangles.data && sdfDesc.baseMesh.points.data;
if (baseMeshSpecified)
{
convert16To32Bits(sdfDesc.baseMesh, indices32);
if (sdfDesc.baseMesh.points.stride != sizeof(PxVec3))
{
vertices.resize(sdfDesc.baseMesh.points.count);
immediateCooking::gatherStrided(sdfDesc.baseMesh.points.data, vertices.begin(), sdfDesc.baseMesh.points.count, sizeof(PxVec3), sdfDesc.baseMesh.points.stride);
verticesPtr = vertices.begin();
}
else
verticesPtr = reinterpret_cast<const PxVec3*>(sdfDesc.baseMesh.points.data);
}
PxU32* indices = baseMeshSpecified ? indices32.begin() : &mesh.m_indices[0];
PxU32 numTriangleIndices = baseMeshSpecified ? indices32.size() : mesh.m_indices.size();
const PxVec3* verts = baseMeshSpecified ? verticesPtr : &mesh.m_positions[0];
PxU32 numVertices = baseMeshSpecified ? sdfDesc.baseMesh.points.count : mesh.m_positions.size();
if (sdfDesc.sdfBuilder == NULL)
{
Gu::SDFUsingWindingNumbers(verts, indices, numTriangleIndices, dx, dy, dz, &sdf[0], meshLower,
meshLower + PxVec3(static_cast<PxReal>(dx), static_cast<PxReal>(dy), static_cast<PxReal>(dz)) * spacing, NULL, true,
sdfDesc.numThreadsForSdfConstruction, sdfDesc.sdfBuilder);
}
else
{
PxArray<PxU32> repairedIndices;
//Analyze the mesh to catch and fix some special cases
//There are meshes where every triangle is present once with cw and once with ccw orientation. Try to filter out only one set
Gu::analyzeAndFixMesh(verts, indices, numTriangleIndices, repairedIndices);
const PxU32* ind = repairedIndices.size() > 0 ? repairedIndices.begin() : indices;
if (repairedIndices.size() > 0)
numTriangleIndices = repairedIndices.size();
sdfDesc.sdfBuilder->buildSDF(verts, numVertices, ind, numTriangleIndices, dx, dy, dz, meshLower,
meshLower + PxVec3(static_cast<PxReal>(dx), static_cast<PxReal>(dy), static_cast<PxReal>(dz)) * spacing, true, &sdf[0]);
}
sdfDesc.sdf.count = sdfDesc.dims.x * sdfDesc.dims.y * sdfDesc.dims.z;
sdfDesc.sdf.stride = sizeof(PxReal);
sdfDesc.sdf.data = &sdf[0];
return true;
}
bool buildSDF(PxTriangleMeshDesc& desc, PxArray<PxReal>& sdf, PxArray<PxU8>& sdfDataSubgrids, PxArray<PxU32>& sdfSubgridsStartSlots)
{
PxSDFDesc& sdfDesc = *desc.sdfDesc;
if (!sdfDesc.sdf.data && sdfDesc.spacing > 0.f)
{
// Calculate signed distance field here if no sdf data provided.
createSDF(desc, sdfDesc, sdf, sdfDataSubgrids, sdfSubgridsStartSlots);
sdfDesc.sdf.stride = sizeof(PxReal);
}
return true;
}
}
| 15,609 | C++ | 37.54321 | 190 | 0.72503 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/distance/GuDistanceSegmentBox.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuDistanceSegmentBox.h"
#include "GuDistancePointBox.h"
#include "GuDistanceSegmentSegment.h"
#include "GuDistancePointSegment.h"
#include "GuIntersectionRayBox.h"
using namespace physx;
static void face(unsigned int i0, unsigned int i1, unsigned int i2, PxVec3& rkPnt, const PxVec3& rkDir, const PxVec3& extents, const PxVec3& rkPmE, PxReal* pfLParam, PxReal& rfSqrDistance)
{
PxVec3 kPpE;
PxReal fLSqr, fInv, fTmp, fParam, fT, fDelta;
kPpE[i1] = rkPnt[i1] + extents[i1];
kPpE[i2] = rkPnt[i2] + extents[i2];
if(rkDir[i0]*kPpE[i1] >= rkDir[i1]*rkPmE[i0])
{
if(rkDir[i0]*kPpE[i2] >= rkDir[i2]*rkPmE[i0])
{
// v[i1] >= -e[i1], v[i2] >= -e[i2] (distance = 0)
if(pfLParam)
{
rkPnt[i0] = extents[i0];
fInv = 1.0f/rkDir[i0];
rkPnt[i1] -= rkDir[i1]*rkPmE[i0]*fInv;
rkPnt[i2] -= rkDir[i2]*rkPmE[i0]*fInv;
*pfLParam = -rkPmE[i0]*fInv;
}
}
else
{
// v[i1] >= -e[i1], v[i2] < -e[i2]
fLSqr = rkDir[i0]*rkDir[i0] + rkDir[i2]*rkDir[i2];
fTmp = fLSqr*kPpE[i1] - rkDir[i1]*(rkDir[i0]*rkPmE[i0] + rkDir[i2]*kPpE[i2]);
if(fTmp <= 2.0f*fLSqr*extents[i1])
{
fT = fTmp/fLSqr;
fLSqr += rkDir[i1]*rkDir[i1];
fTmp = kPpE[i1] - fT;
fDelta = rkDir[i0]*rkPmE[i0] + rkDir[i1]*fTmp + rkDir[i2]*kPpE[i2];
fParam = -fDelta/fLSqr;
rfSqrDistance += rkPmE[i0]*rkPmE[i0] + fTmp*fTmp + kPpE[i2]*kPpE[i2] + fDelta*fParam;
if(pfLParam)
{
*pfLParam = fParam;
rkPnt[i0] = extents[i0];
rkPnt[i1] = fT - extents[i1];
rkPnt[i2] = -extents[i2];
}
}
else
{
fLSqr += rkDir[i1]*rkDir[i1];
fDelta = rkDir[i0]*rkPmE[i0] + rkDir[i1]*rkPmE[i1] + rkDir[i2]*kPpE[i2];
fParam = -fDelta/fLSqr;
rfSqrDistance += rkPmE[i0]*rkPmE[i0] + rkPmE[i1]*rkPmE[i1] + kPpE[i2]*kPpE[i2] + fDelta*fParam;
if(pfLParam)
{
*pfLParam = fParam;
rkPnt[i0] = extents[i0];
rkPnt[i1] = extents[i1];
rkPnt[i2] = -extents[i2];
}
}
}
}
else
{
if ( rkDir[i0]*kPpE[i2] >= rkDir[i2]*rkPmE[i0] )
{
// v[i1] < -e[i1], v[i2] >= -e[i2]
fLSqr = rkDir[i0]*rkDir[i0] + rkDir[i1]*rkDir[i1];
fTmp = fLSqr*kPpE[i2] - rkDir[i2]*(rkDir[i0]*rkPmE[i0] + rkDir[i1]*kPpE[i1]);
if(fTmp <= 2.0f*fLSqr*extents[i2])
{
fT = fTmp/fLSqr;
fLSqr += rkDir[i2]*rkDir[i2];
fTmp = kPpE[i2] - fT;
fDelta = rkDir[i0]*rkPmE[i0] + rkDir[i1]*kPpE[i1] + rkDir[i2]*fTmp;
fParam = -fDelta/fLSqr;
rfSqrDistance += rkPmE[i0]*rkPmE[i0] + kPpE[i1]*kPpE[i1] + fTmp*fTmp + fDelta*fParam;
if(pfLParam)
{
*pfLParam = fParam;
rkPnt[i0] = extents[i0];
rkPnt[i1] = -extents[i1];
rkPnt[i2] = fT - extents[i2];
}
}
else
{
fLSqr += rkDir[i2]*rkDir[i2];
fDelta = rkDir[i0]*rkPmE[i0] + rkDir[i1]*kPpE[i1] + rkDir[i2]*rkPmE[i2];
fParam = -fDelta/fLSqr;
rfSqrDistance += rkPmE[i0]*rkPmE[i0] + kPpE[i1]*kPpE[i1] + rkPmE[i2]*rkPmE[i2] + fDelta*fParam;
if(pfLParam)
{
*pfLParam = fParam;
rkPnt[i0] = extents[i0];
rkPnt[i1] = -extents[i1];
rkPnt[i2] = extents[i2];
}
}
}
else
{
// v[i1] < -e[i1], v[i2] < -e[i2]
fLSqr = rkDir[i0]*rkDir[i0]+rkDir[i2]*rkDir[i2];
fTmp = fLSqr*kPpE[i1] - rkDir[i1]*(rkDir[i0]*rkPmE[i0] + rkDir[i2]*kPpE[i2]);
if(fTmp >= 0.0f)
{
// v[i1]-edge is closest
if ( fTmp <= 2.0f*fLSqr*extents[i1] )
{
fT = fTmp/fLSqr;
fLSqr += rkDir[i1]*rkDir[i1];
fTmp = kPpE[i1] - fT;
fDelta = rkDir[i0]*rkPmE[i0] + rkDir[i1]*fTmp + rkDir[i2]*kPpE[i2];
fParam = -fDelta/fLSqr;
rfSqrDistance += rkPmE[i0]*rkPmE[i0] + fTmp*fTmp + kPpE[i2]*kPpE[i2] + fDelta*fParam;
if(pfLParam)
{
*pfLParam = fParam;
rkPnt[i0] = extents[i0];
rkPnt[i1] = fT - extents[i1];
rkPnt[i2] = -extents[i2];
}
}
else
{
fLSqr += rkDir[i1]*rkDir[i1];
fDelta = rkDir[i0]*rkPmE[i0] + rkDir[i1]*rkPmE[i1] + rkDir[i2]*kPpE[i2];
fParam = -fDelta/fLSqr;
rfSqrDistance += rkPmE[i0]*rkPmE[i0] + rkPmE[i1]*rkPmE[i1] + kPpE[i2]*kPpE[i2] + fDelta*fParam;
if(pfLParam)
{
*pfLParam = fParam;
rkPnt[i0] = extents[i0];
rkPnt[i1] = extents[i1];
rkPnt[i2] = -extents[i2];
}
}
return;
}
fLSqr = rkDir[i0]*rkDir[i0] + rkDir[i1]*rkDir[i1];
fTmp = fLSqr*kPpE[i2] - rkDir[i2]*(rkDir[i0]*rkPmE[i0] + rkDir[i1]*kPpE[i1]);
if(fTmp >= 0.0f)
{
// v[i2]-edge is closest
if(fTmp <= 2.0f*fLSqr*extents[i2])
{
fT = fTmp/fLSqr;
fLSqr += rkDir[i2]*rkDir[i2];
fTmp = kPpE[i2] - fT;
fDelta = rkDir[i0]*rkPmE[i0] + rkDir[i1]*kPpE[i1] + rkDir[i2]*fTmp;
fParam = -fDelta/fLSqr;
rfSqrDistance += rkPmE[i0]*rkPmE[i0] + kPpE[i1]*kPpE[i1] + fTmp*fTmp + fDelta*fParam;
if(pfLParam)
{
*pfLParam = fParam;
rkPnt[i0] = extents[i0];
rkPnt[i1] = -extents[i1];
rkPnt[i2] = fT - extents[i2];
}
}
else
{
fLSqr += rkDir[i2]*rkDir[i2];
fDelta = rkDir[i0]*rkPmE[i0] + rkDir[i1]*kPpE[i1] + rkDir[i2]*rkPmE[i2];
fParam = -fDelta/fLSqr;
rfSqrDistance += rkPmE[i0]*rkPmE[i0] + kPpE[i1]*kPpE[i1] + rkPmE[i2]*rkPmE[i2] + fDelta*fParam;
if(pfLParam)
{
*pfLParam = fParam;
rkPnt[i0] = extents[i0];
rkPnt[i1] = -extents[i1];
rkPnt[i2] = extents[i2];
}
}
return;
}
// (v[i1],v[i2])-corner is closest
fLSqr += rkDir[i2]*rkDir[i2];
fDelta = rkDir[i0]*rkPmE[i0] + rkDir[i1]*kPpE[i1] + rkDir[i2]*kPpE[i2];
fParam = -fDelta/fLSqr;
rfSqrDistance += rkPmE[i0]*rkPmE[i0] + kPpE[i1]*kPpE[i1] + kPpE[i2]*kPpE[i2] + fDelta*fParam;
if(pfLParam)
{
*pfLParam = fParam;
rkPnt[i0] = extents[i0];
rkPnt[i1] = -extents[i1];
rkPnt[i2] = -extents[i2];
}
}
}
}
static void caseNoZeros(PxVec3& rkPnt, const PxVec3& rkDir, const PxVec3& extents, PxReal* pfLParam, PxReal& rfSqrDistance)
{
PxVec3 kPmE(rkPnt.x - extents.x, rkPnt.y - extents.y, rkPnt.z - extents.z);
PxReal fProdDxPy, fProdDyPx, fProdDzPx, fProdDxPz, fProdDzPy, fProdDyPz;
fProdDxPy = rkDir.x*kPmE.y;
fProdDyPx = rkDir.y*kPmE.x;
if(fProdDyPx >= fProdDxPy)
{
fProdDzPx = rkDir.z*kPmE.x;
fProdDxPz = rkDir.x*kPmE.z;
if(fProdDzPx >= fProdDxPz)
{
// line intersects x = e0
face(0, 1, 2, rkPnt, rkDir, extents, kPmE, pfLParam, rfSqrDistance);
}
else
{
// line intersects z = e2
face(2, 0, 1, rkPnt, rkDir, extents, kPmE, pfLParam, rfSqrDistance);
}
}
else
{
fProdDzPy = rkDir.z*kPmE.y;
fProdDyPz = rkDir.y*kPmE.z;
if(fProdDzPy >= fProdDyPz)
{
// line intersects y = e1
face(1, 2, 0, rkPnt, rkDir, extents, kPmE, pfLParam, rfSqrDistance);
}
else
{
// line intersects z = e2
face(2, 0, 1, rkPnt, rkDir, extents, kPmE, pfLParam, rfSqrDistance);
}
}
}
static void case0(unsigned int i0, unsigned int i1, unsigned int i2, PxVec3& rkPnt, const PxVec3& rkDir, const PxVec3& extents, PxReal* pfLParam, PxReal& rfSqrDistance)
{
PxReal fPmE0 = rkPnt[i0] - extents[i0];
PxReal fPmE1 = rkPnt[i1] - extents[i1];
PxReal fProd0 = rkDir[i1]*fPmE0;
PxReal fProd1 = rkDir[i0]*fPmE1;
PxReal fDelta, fInvLSqr, fInv;
if(fProd0 >= fProd1)
{
// line intersects P[i0] = e[i0]
rkPnt[i0] = extents[i0];
PxReal fPpE1 = rkPnt[i1] + extents[i1];
fDelta = fProd0 - rkDir[i0]*fPpE1;
if(fDelta >= 0.0f)
{
fInvLSqr = 1.0f/(rkDir[i0]*rkDir[i0] + rkDir[i1]*rkDir[i1]);
rfSqrDistance += fDelta*fDelta*fInvLSqr;
if(pfLParam)
{
rkPnt[i1] = -extents[i1];
*pfLParam = -(rkDir[i0]*fPmE0+rkDir[i1]*fPpE1)*fInvLSqr;
}
}
else
{
if(pfLParam)
{
fInv = 1.0f/rkDir[i0];
rkPnt[i1] -= fProd0*fInv;
*pfLParam = -fPmE0*fInv;
}
}
}
else
{
// line intersects P[i1] = e[i1]
rkPnt[i1] = extents[i1];
PxReal fPpE0 = rkPnt[i0] + extents[i0];
fDelta = fProd1 - rkDir[i1]*fPpE0;
if(fDelta >= 0.0f)
{
fInvLSqr = 1.0f/(rkDir[i0]*rkDir[i0] + rkDir[i1]*rkDir[i1]);
rfSqrDistance += fDelta*fDelta*fInvLSqr;
if(pfLParam)
{
rkPnt[i0] = -extents[i0];
*pfLParam = -(rkDir[i0]*fPpE0+rkDir[i1]*fPmE1)*fInvLSqr;
}
}
else
{
if(pfLParam)
{
fInv = 1.0f/rkDir[i1];
rkPnt[i0] -= fProd1*fInv;
*pfLParam = -fPmE1*fInv;
}
}
}
if(rkPnt[i2] < -extents[i2])
{
fDelta = rkPnt[i2] + extents[i2];
rfSqrDistance += fDelta*fDelta;
rkPnt[i2] = -extents[i2];
}
else if ( rkPnt[i2] > extents[i2] )
{
fDelta = rkPnt[i2] - extents[i2];
rfSqrDistance += fDelta*fDelta;
rkPnt[i2] = extents[i2];
}
}
static void case00(unsigned int i0, unsigned int i1, unsigned int i2, PxVec3& rkPnt, const PxVec3& rkDir, const PxVec3& extents, PxReal* pfLParam, PxReal& rfSqrDistance)
{
PxReal fDelta;
if(pfLParam)
*pfLParam = (extents[i0] - rkPnt[i0])/rkDir[i0];
rkPnt[i0] = extents[i0];
if(rkPnt[i1] < -extents[i1])
{
fDelta = rkPnt[i1] + extents[i1];
rfSqrDistance += fDelta*fDelta;
rkPnt[i1] = -extents[i1];
}
else if(rkPnt[i1] > extents[i1])
{
fDelta = rkPnt[i1] - extents[i1];
rfSqrDistance += fDelta*fDelta;
rkPnt[i1] = extents[i1];
}
if(rkPnt[i2] < -extents[i2])
{
fDelta = rkPnt[i2] + extents[i2];
rfSqrDistance += fDelta*fDelta;
rkPnt[i2] = -extents[i2];
}
else if(rkPnt[i2] > extents[i2])
{
fDelta = rkPnt[i2] - extents[i2];
rfSqrDistance += fDelta*fDelta;
rkPnt[i2] = extents[i2];
}
}
static void case000(PxVec3& rkPnt, const PxVec3& extents, PxReal& rfSqrDistance)
{
PxReal fDelta;
if(rkPnt.x < -extents.x)
{
fDelta = rkPnt.x + extents.x;
rfSqrDistance += fDelta*fDelta;
rkPnt.x = -extents.x;
}
else if(rkPnt.x > extents.x)
{
fDelta = rkPnt.x - extents.x;
rfSqrDistance += fDelta*fDelta;
rkPnt.x = extents.x;
}
if(rkPnt.y < -extents.y)
{
fDelta = rkPnt.y + extents.y;
rfSqrDistance += fDelta*fDelta;
rkPnt.y = -extents.y;
}
else if(rkPnt.y > extents.y)
{
fDelta = rkPnt.y - extents.y;
rfSqrDistance += fDelta*fDelta;
rkPnt.y = extents.y;
}
if(rkPnt.z < -extents.z)
{
fDelta = rkPnt.z + extents.z;
rfSqrDistance += fDelta*fDelta;
rkPnt.z = -extents.z;
}
else if(rkPnt.z > extents.z)
{
fDelta = rkPnt.z - extents.z;
rfSqrDistance += fDelta*fDelta;
rkPnt.z = extents.z;
}
}
//! Compute the smallest distance from the (infinite) line to the box.
static PxReal distanceLineBoxSquared(const PxVec3& lineOrigin, const PxVec3& lineDirection,
const PxVec3& boxOrigin, const PxVec3& boxExtent, const PxMat33& boxBase,
PxReal* lineParam,
PxVec3* boxParam)
{
const PxVec3& axis0 = boxBase.column0;
const PxVec3& axis1 = boxBase.column1;
const PxVec3& axis2 = boxBase.column2;
// compute coordinates of line in box coordinate system
const PxVec3 diff = lineOrigin - boxOrigin;
PxVec3 pnt(diff.dot(axis0), diff.dot(axis1), diff.dot(axis2));
PxVec3 dir(lineDirection.dot(axis0), lineDirection.dot(axis1), lineDirection.dot(axis2));
// Apply reflections so that direction vector has nonnegative components.
bool reflect[3];
for(unsigned int i=0;i<3;i++)
{
if(dir[i]<0.0f)
{
pnt[i] = -pnt[i];
dir[i] = -dir[i];
reflect[i] = true;
}
else
{
reflect[i] = false;
}
}
PxReal sqrDistance = 0.0f;
if(dir.x>0.0f)
{
if(dir.y>0.0f)
{
if(dir.z>0.0f) caseNoZeros(pnt, dir, boxExtent, lineParam, sqrDistance); // (+,+,+)
else case0(0, 1, 2, pnt, dir, boxExtent, lineParam, sqrDistance); // (+,+,0)
}
else
{
if(dir.z>0.0f) case0(0, 2, 1, pnt, dir, boxExtent, lineParam, sqrDistance); // (+,0,+)
else case00(0, 1, 2, pnt, dir, boxExtent, lineParam, sqrDistance); // (+,0,0)
}
}
else
{
if(dir.y>0.0f)
{
if(dir.z>0.0f) case0(1, 2, 0, pnt, dir, boxExtent, lineParam, sqrDistance); // (0,+,+)
else case00(1, 0, 2, pnt, dir, boxExtent, lineParam, sqrDistance); // (0,+,0)
}
else
{
if(dir.z>0.0f) case00(2, 0, 1, pnt, dir, boxExtent, lineParam, sqrDistance); // (0,0,+)
else
{
case000(pnt, boxExtent, sqrDistance); // (0,0,0)
if(lineParam)
*lineParam = 0.0f;
}
}
}
if(boxParam)
{
// undo reflections
for(unsigned int i=0;i<3;i++)
{
if(reflect[i])
pnt[i] = -pnt[i];
}
*boxParam = pnt;
}
return sqrDistance;
}
//! Compute the smallest distance from the (finite) line segment to the box.
PxReal Gu::distanceSegmentBoxSquared( const PxVec3& segmentPoint0, const PxVec3& segmentPoint1,
const PxVec3& boxOrigin, const PxVec3& boxExtent, const PxMat33& boxBase,
PxReal* segmentParam,
PxVec3* boxParam)
{
// compute coordinates of line in box coordinate system
PxReal lp;
PxVec3 bp;
PxReal sqrDistance = distanceLineBoxSquared(segmentPoint0, segmentPoint1 - segmentPoint0, boxOrigin, boxExtent, boxBase, &lp, &bp);
if(lp>=0.0f)
{
if(lp<=1.0f)
{
if(segmentParam)
*segmentParam = lp;
if(boxParam)
*boxParam = bp;
return sqrDistance;
}
else
{
if(segmentParam)
*segmentParam = 1.0f;
return Gu::distancePointBoxSquared(segmentPoint1, boxOrigin, boxExtent, boxBase, boxParam);
}
}
else
{
if(segmentParam)
*segmentParam = 0.0f;
return Gu::distancePointBoxSquared(segmentPoint0, boxOrigin, boxExtent, boxBase, boxParam);
}
}
| 14,831 | C++ | 26.016393 | 188 | 0.621873 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/distance/GuDistancePointSegment.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_DISTANCE_POINT_SEGMENT_H
#define GU_DISTANCE_POINT_SEGMENT_H
#include "common/PxPhysXCommonConfig.h"
#include "GuSegment.h"
namespace physx
{
namespace Gu
{
// dir = p1 - p0
PX_FORCE_INLINE PxReal distancePointSegmentSquaredInternal(const PxVec3& p0, const PxVec3& dir, const PxVec3& point, PxReal* param=NULL)
{
PxVec3 diff = point - p0;
PxReal fT = diff.dot(dir);
if(fT<=0.0f)
{
fT = 0.0f;
}
else
{
const PxReal sqrLen = dir.magnitudeSquared();
if(fT>=sqrLen)
{
fT = 1.0f;
diff -= dir;
}
else
{
fT /= sqrLen;
diff -= fT*dir;
}
}
if(param)
*param = fT;
return diff.magnitudeSquared();
}
/**
A segment is defined by S(t) = mP0 * (1 - t) + mP1 * t, with 0 <= t <= 1
Alternatively, a segment is S(t) = Origin + t * Direction for 0 <= t <= 1.
Direction is not necessarily unit length. The end points are Origin = mP0 and Origin + Direction = mP1.
*/
PX_FORCE_INLINE PxReal distancePointSegmentSquared(const PxVec3& p0, const PxVec3& p1, const PxVec3& point, PxReal* param=NULL)
{
return distancePointSegmentSquaredInternal(p0, p1 - p0, point, param);
}
PX_INLINE PxReal distancePointSegmentSquared(const Gu::Segment& segment, const PxVec3& point, PxReal* param=NULL)
{
return distancePointSegmentSquared(segment.p0, segment.p1, point, param);
}
} // namespace Gu
}
#endif
| 3,070 | C | 33.122222 | 137 | 0.72215 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/distance/GuDistancePointTetrahedron.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_DISTANCE_POINT_TETRAHEDRON_H
#define GU_DISTANCE_POINT_TETRAHEDRON_H
#include "foundation/PxVec3.h"
#include "foundation/PxVec4.h"
#include "GuDistancePointTriangle.h"
#include "common/PxPhysXCommonConfig.h"
namespace physx
{
namespace Gu
{
PX_PHYSX_COMMON_API PxVec4 PointOutsideOfPlane4(const PxVec3& p, const PxVec3& _a, const PxVec3& _b,
const PxVec3& _c, const PxVec3& _d);
PX_PHYSX_COMMON_API PxVec3 closestPtPointTetrahedron(const PxVec3& p, const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d, const PxVec4& result);
PX_INLINE PX_CUDA_CALLABLE PxVec3 closestPtPointTetrahedron(const PxVec3& p, const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d)
{
const PxVec3 ab = b - a;
const PxVec3 ac = c - a;
const PxVec3 ad = d - a;
const PxVec3 bc = c - b;
const PxVec3 bd = d - b;
//point to face 0, 1, 2
PxVec3 bestClosestPt = closestPtPointTriangle2(p, a, b, c, ab, ac);
PxReal bestSqDist = bestClosestPt.dot(bestClosestPt);
// 0, 2, 3
PxVec3 closestPt = closestPtPointTriangle2(p, a, c, d, ac, ad);
PxReal sqDist = closestPt.dot(closestPt);
if (sqDist < bestSqDist)
{
bestClosestPt = closestPt;
bestSqDist = sqDist;
}
// 0, 3, 1
closestPt = closestPtPointTriangle2(p, a, d, b, ad, ab);
sqDist = closestPt.dot(closestPt);
if (sqDist < bestSqDist)
{
bestClosestPt = closestPt;
bestSqDist = sqDist;
}
// 1, 3, 2
closestPt = closestPtPointTriangle2(p, b, d, c, bd, bc);
sqDist = closestPt.dot(closestPt);
if (sqDist < bestSqDist)
{
bestClosestPt = closestPt;
bestSqDist = sqDist;
}
return bestClosestPt;
}
}
}
#endif
| 3,389 | C | 35.451613 | 162 | 0.720271 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/distance/GuDistancePointTetrahedron.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuDistancePointTetrahedron.h"
#include "GuDistancePointTriangle.h"
using namespace physx;
PxVec4 Gu::PointOutsideOfPlane4(const PxVec3& p, const PxVec3& _a, const PxVec3& _b,
const PxVec3& _c, const PxVec3& _d)
{
const PxVec3 ap = p - _a;
const PxVec3 ab = _b - _a;
const PxVec3 ac = _c - _a;
const PxVec3 ad = _d - _a;
const PxVec3 v0 = ab.cross(ac);
const float signa0 = v0.dot(ap);
const float signd0 = v0.dot(ad);// V3Dot(v0, _d);
const PxVec3 v1 = ac.cross(ad);
const float signa1 = v1.dot(ap);
const float signd1 = v1.dot(ab);
const PxVec3 v2 = ad.cross(ab);
const float signa2 = v2.dot(ap);
const float signd2 = v2.dot(ac);// V3Dot(v2, _c);
const PxVec3 bd = _d - _b;
const PxVec3 bc = _c - _b;
const PxVec3 v3 = bd.cross(bc);
const float signd3 = v3.dot(p - _b);
const float signa3 = v3.dot(_a - _b);
//if combined signDist is leass zero, p is outside of that face
PxVec4 result = PxVec4(signa0 * signd0, signa1 * signd1, signa2 * signd2, signa3 * signd3);
return result;
}
PxVec3 Gu::closestPtPointTetrahedron(const PxVec3& p, const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d, const PxVec4& result)
{
const PxVec3 ab = b - a;
const PxVec3 ac = c - a;
const PxVec3 ad = d - a;
const PxVec3 bc = c - b;
const PxVec3 bd = d - b;
//point is outside of this face
PxVec3 bestClosestPt(0.f, 0.f, 0.f);
PxReal bestSqDist = PX_MAX_F32;
if (result.x < 0.f)
{
// 0, 1, 2
bestClosestPt = closestPtPointTriangle2(p, a, b, c, ab, ac);
bestSqDist = bestClosestPt.dot(bestClosestPt);
}
if (result.y < 0.f)
{
// 0, 2, 3
const PxVec3 closestPt = closestPtPointTriangle2(p, a, c, d, ac, ad);
const PxReal sqDist = closestPt.dot(closestPt);
if (sqDist < bestSqDist)
{
bestClosestPt = closestPt;
bestSqDist = sqDist;
}
}
if (result.z < 0.f)
{
// 0, 3, 1
const PxVec3 closestPt = closestPtPointTriangle2(p, a, d, b, ad, ab);
const PxReal sqDist = closestPt.dot(closestPt);
if (sqDist < bestSqDist)
{
bestClosestPt = closestPt;
bestSqDist = sqDist;
}
}
if (result.w < 0.f)
{
// 1, 3, 2
const PxVec3 closestPt = closestPtPointTriangle2(p, b, d, c, bd, bc);
const PxReal sqDist = closestPt.dot(closestPt);
if (sqDist < bestSqDist)
{
bestClosestPt = closestPt;
bestSqDist = sqDist;
}
}
return bestClosestPt;
}
| 4,036 | C++ | 32.090164 | 143 | 0.700942 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/distance/GuDistanceTriangleTriangle.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuDistanceTriangleTriangle.h"
#include "foundation/PxVecMath.h"
using namespace physx;
using namespace Gu;
using namespace aos;
void edgeEdgeDist(PxVec3& x, PxVec3& y, const PxVec3& p, const PxVec3& a, const PxVec3& q, const PxVec3& b);
float Gu::distanceTriangleTriangleSquared(PxVec3& cp, PxVec3& cq, const PxVec3p p[3], const PxVec3p q[3])
{
PxVec3p Sv[3];
V4StoreU(V4Sub(V4LoadU(&p[1].x), V4LoadU(&p[0].x)), &Sv[0].x);
V4StoreU(V4Sub(V4LoadU(&p[2].x), V4LoadU(&p[1].x)), &Sv[1].x);
V4StoreU(V4Sub(V4LoadU(&p[0].x), V4LoadU(&p[2].x)), &Sv[2].x);
PxVec3p Tv[3];
V4StoreU(V4Sub(V4LoadU(&q[1].x), V4LoadU(&q[0].x)), &Tv[0].x);
V4StoreU(V4Sub(V4LoadU(&q[2].x), V4LoadU(&q[1].x)), &Tv[1].x);
V4StoreU(V4Sub(V4LoadU(&q[0].x), V4LoadU(&q[2].x)), &Tv[2].x);
PxVec3 minP, minQ;
bool shown_disjoint = false;
float mindd = PX_MAX_F32;
for(PxU32 i=0;i<3;i++)
{
for(PxU32 j=0;j<3;j++)
{
edgeEdgeDist(cp, cq, p[i], Sv[i], q[j], Tv[j]);
const PxVec3 V = cq - cp;
const float dd = V.dot(V);
if(dd<=mindd)
{
minP = cp;
minQ = cq;
mindd = dd;
PxU32 id = i+2;
if(id>=3)
id-=3;
PxVec3 Z = p[id] - cp;
float a = Z.dot(V);
id = j+2;
if(id>=3)
id-=3;
Z = q[id] - cq;
float b = Z.dot(V);
if((a<=0.0f) && (b>=0.0f))
return V.dot(V);
if(a<=0.0f) a = 0.0f;
else if(b>0.0f) b = 0.0f;
if((mindd - a + b) > 0.0f)
shown_disjoint = true;
}
}
}
PxVec3 Sn = Sv[0].cross(Sv[1]);
float Snl = Sn.dot(Sn);
if(Snl>1e-15f)
{
const PxVec3 Tp((p[0] - q[0]).dot(Sn),
(p[0] - q[1]).dot(Sn),
(p[0] - q[2]).dot(Sn));
int index = -1;
if((Tp[0]>0.0f) && (Tp[1]>0.0f) && (Tp[2]>0.0f))
{
if(Tp[0]<Tp[1]) index = 0; else index = 1;
if(Tp[2]<Tp[index]) index = 2;
}
else if((Tp[0]<0.0f) && (Tp[1]<0.0f) && (Tp[2]<0.0f))
{
if(Tp[0]>Tp[1]) index = 0; else index = 1;
if(Tp[2]>Tp[index]) index = 2;
}
if(index >= 0)
{
shown_disjoint = true;
const PxVec3& qIndex = q[index];
PxVec3 V = qIndex - p[0];
PxVec3 Z = Sn.cross(Sv[0]);
if(V.dot(Z)>0.0f)
{
V = qIndex - p[1];
Z = Sn.cross(Sv[1]);
if(V.dot(Z)>0.0f)
{
V = qIndex - p[2];
Z = Sn.cross(Sv[2]);
if(V.dot(Z)>0.0f)
{
cp = qIndex + Sn * Tp[index]/Snl;
cq = qIndex;
return (cp - cq).magnitudeSquared();
}
}
}
}
}
PxVec3 Tn = Tv[0].cross(Tv[1]);
float Tnl = Tn.dot(Tn);
if(Tnl>1e-15f)
{
const PxVec3 Sp((q[0] - p[0]).dot(Tn),
(q[0] - p[1]).dot(Tn),
(q[0] - p[2]).dot(Tn));
int index = -1;
if((Sp[0]>0.0f) && (Sp[1]>0.0f) && (Sp[2]>0.0f))
{
if(Sp[0]<Sp[1]) index = 0; else index = 1;
if(Sp[2]<Sp[index]) index = 2;
}
else if((Sp[0]<0.0f) && (Sp[1]<0.0f) && (Sp[2]<0.0f))
{
if(Sp[0]>Sp[1]) index = 0; else index = 1;
if(Sp[2]>Sp[index]) index = 2;
}
if(index >= 0)
{
shown_disjoint = true;
const PxVec3& pIndex = p[index];
PxVec3 V = pIndex - q[0];
PxVec3 Z = Tn.cross(Tv[0]);
if(V.dot(Z)>0.0f)
{
V = pIndex - q[1];
Z = Tn.cross(Tv[1]);
if(V.dot(Z)>0.0f)
{
V = pIndex - q[2];
Z = Tn.cross(Tv[2]);
if(V.dot(Z)>0.0f)
{
cp = pIndex;
cq = pIndex + Tn * Sp[index]/Tnl;
return (cp - cq).magnitudeSquared();
}
}
}
}
}
if(shown_disjoint)
{
cp = minP;
cq = minQ;
return mindd;
}
else return 0.0f;
}
| 5,125 | C++ | 25.153061 | 108 | 0.590634 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/distance/GuDistancePointTriangle.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxVec3.h"
#include "GuDistancePointTriangle.h"
using namespace physx;
// Based on Christer Ericson's book
PxVec3 Gu::closestPtPointTriangle(const PxVec3& p, const PxVec3& a, const PxVec3& b, const PxVec3& c, float& s, float& t)
{
// Check if P in vertex region outside A
const PxVec3 ab = b - a;
const PxVec3 ac = c - a;
const PxVec3 ap = p - a;
const float d1 = ab.dot(ap);
const float d2 = ac.dot(ap);
if(d1<=0.0f && d2<=0.0f)
{
s = 0.0f;
t = 0.0f;
return a; // Barycentric coords 1,0,0
}
// Check if P in vertex region outside B
const PxVec3 bp = p - b;
const float d3 = ab.dot(bp);
const float d4 = ac.dot(bp);
if(d3>=0.0f && d4<=d3)
{
s = 1.0f;
t = 0.0f;
return b; // Barycentric coords 0,1,0
}
// Check if P in edge region of AB, if so return projection of P onto AB
const float vc = d1*d4 - d3*d2;
if(vc<=0.0f && d1>=0.0f && d3<=0.0f)
{
const float v = d1 / (d1 - d3);
s = v;
t = 0.0f;
return a + v * ab; // barycentric coords (1-v, v, 0)
}
// Check if P in vertex region outside C
const PxVec3 cp = p - c;
const float d5 = ab.dot(cp);
const float d6 = ac.dot(cp);
if(d6>=0.0f && d5<=d6)
{
s = 0.0f;
t = 1.0f;
return c; // Barycentric coords 0,0,1
}
// Check if P in edge region of AC, if so return projection of P onto AC
const float vb = d5*d2 - d1*d6;
if(vb<=0.0f && d2>=0.0f && d6<=0.0f)
{
const float w = d2 / (d2 - d6);
s = 0.0f;
t = w;
return a + w * ac; // barycentric coords (1-w, 0, w)
}
// Check if P in edge region of BC, if so return projection of P onto BC
const float va = d3*d6 - d5*d4;
if(va<=0.0f && (d4-d3)>=0.0f && (d5-d6)>=0.0f)
{
const float w = (d4-d3) / ((d4 - d3) + (d5-d6));
s = 1.0f-w;
t = w;
return b + w * (c-b); // barycentric coords (0, 1-w, w)
}
// P inside face region. Compute Q through its barycentric coords (u,v,w)
const float denom = 1.0f / (va + vb + vc);
const float v = vb * denom;
const float w = vc * denom;
s = v;
t = w;
return a + ab*v + ac*w;
}
//aos::FloatV Gu::distancePointTriangleSquared( const aos::Vec3VArg p,
// const aos::Vec3VArg a,
// const aos::Vec3VArg b,
// const aos::Vec3VArg c,
// aos::FloatV& u,
// aos::FloatV& v,
// aos::Vec3V& closestP)
//{
// using namespace aos;
//
// const FloatV zero = FZero();
// const FloatV one = FOne();
// //const Vec3V zero = V3Zero();
// const Vec3V ab = V3Sub(b, a);
// const Vec3V ac = V3Sub(c, a);
// const Vec3V bc = V3Sub(c, b);
// const Vec3V ap = V3Sub(p, a);
// const Vec3V bp = V3Sub(p, b);
// const Vec3V cp = V3Sub(p, c);
//
// const FloatV d1 = V3Dot(ab, ap); // snom
// const FloatV d2 = V3Dot(ac, ap); // tnom
// const FloatV d3 = V3Dot(ab, bp); // -sdenom
// const FloatV d4 = V3Dot(ac, bp); // unom = d4 - d3
// const FloatV d5 = V3Dot(ab, cp); // udenom = d5 - d6
// const FloatV d6 = V3Dot(ac, cp); // -tdenom
// const FloatV unom = FSub(d4, d3);
// const FloatV udenom = FSub(d5, d6);
//
// //check if p in vertex region outside a
// const BoolV con00 = FIsGrtr(zero, d1); // snom <= 0
// const BoolV con01 = FIsGrtr(zero, d2); // tnom <= 0
// const BoolV con0 = BAnd(con00, con01); // vertex region a
// const FloatV u0 = zero;
// const FloatV v0 = zero;
//
// //check if p in vertex region outside b
// const BoolV con10 = FIsGrtrOrEq(d3, zero);
// const BoolV con11 = FIsGrtrOrEq(d3, d4);
// const BoolV con1 = BAnd(con10, con11); // vertex region b
// const FloatV u1 = one;
// const FloatV v1 = zero;
//
// //check if p in vertex region outside c
// const BoolV con20 = FIsGrtrOrEq(d6, zero);
// const BoolV con21 = FIsGrtrOrEq(d6, d5);
// const BoolV con2 = BAnd(con20, con21); // vertex region c
// const FloatV u2 = zero;
// const FloatV v2 = one;
//
// //check if p in edge region of AB
// const FloatV vc = FSub(FMul(d1, d4), FMul(d3, d2));
//
// const BoolV con30 = FIsGrtr(zero, vc);
// const BoolV con31 = FIsGrtrOrEq(d1, zero);
// const BoolV con32 = FIsGrtr(zero, d3);
// const BoolV con3 = BAnd(con30, BAnd(con31, con32));
// const FloatV sScale = FDiv(d1, FSub(d1, d3));
// const Vec3V closest3 = V3Add(a, V3Scale(ab, sScale));
// const FloatV u3 = sScale;
// const FloatV v3 = zero;
//
// //check if p in edge region of BC
// const FloatV va = FSub(FMul(d3, d6),FMul(d5, d4));
// const BoolV con40 = FIsGrtr(zero, va);
// const BoolV con41 = FIsGrtrOrEq(d4, d3);
// const BoolV con42 = FIsGrtrOrEq(d5, d6);
// const BoolV con4 = BAnd(con40, BAnd(con41, con42));
// const FloatV uScale = FDiv(unom, FAdd(unom, udenom));
// const Vec3V closest4 = V3Add(b, V3Scale(bc, uScale));
// const FloatV u4 = FSub(one, uScale);
// const FloatV v4 = uScale;
//
// //check if p in edge region of AC
// const FloatV vb = FSub(FMul(d5, d2), FMul(d1, d6));
// const BoolV con50 = FIsGrtr(zero, vb);
// const BoolV con51 = FIsGrtrOrEq(d2, zero);
// const BoolV con52 = FIsGrtr(zero, d6);
// const BoolV con5 = BAnd(con50, BAnd(con51, con52));
// const FloatV tScale = FDiv(d2, FSub(d2, d6));
// const Vec3V closest5 = V3Add(a, V3Scale(ac, tScale));
// const FloatV u5 = zero;
// const FloatV v5 = tScale;
//
// //P must project inside face region. Compute Q using Barycentric coordinates
// const FloatV denom = FRecip(FAdd(va, FAdd(vb, vc)));
// const FloatV t = FMul(vb, denom);
// const FloatV w = FMul(vc, denom);
// const Vec3V bCom = V3Scale(ab, t);
// const Vec3V cCom = V3Scale(ac, w);
// const Vec3V closest6 = V3Add(a, V3Add(bCom, cCom));
// const FloatV u6 = t;
// const FloatV v6 = w;
//
// const Vec3V closest= V3Sel(con0, a, V3Sel(con1, b, V3Sel(con2, c, V3Sel(con3, closest3, V3Sel(con4, closest4, V3Sel(con5, closest5, closest6))))));
// u = FSel(con0, u0, FSel(con1, u1, FSel(con2, u2, FSel(con3, u3, FSel(con4, u4, FSel(con5, u5, u6))))));
// v = FSel(con0, v0, FSel(con1, v1, FSel(con2, v2, FSel(con3, v3, FSel(con4, v4, FSel(con5, v5, v6))))));
// closestP = closest;
//
// const Vec3V vv = V3Sub(p, closest);
//
// return V3Dot(vv, vv);
//}
PX_PHYSX_COMMON_API aos::FloatV Gu::distancePointTriangleSquared2UnitBox(
const aos::Vec3VArg queryPoint,
const aos::Vec3VArg triA,
const aos::Vec3VArg triB,
const aos::Vec3VArg triC,
aos::FloatV& u,
aos::FloatV& v,
aos::Vec3V& closestP)
{
using namespace aos;
const Vec3V min = V3Min(V3Min(triA, triB), V3Min(triC, queryPoint));
const Vec3V max = V3Max(V3Max(triA, triB), V3Max(triC, queryPoint));
const Vec3V size = V3Sub(max, min);
FloatV invScaling = FMax(FLoad(1e-12f), V3ExtractMax(size));
const FloatV one = FOne();
FloatV scaling = FDiv(one, invScaling);
const Vec3V p = V3Scale(V3Sub(queryPoint, min), scaling);
const Vec3V a = V3Scale(V3Sub(triA, min), scaling);
const Vec3V b = V3Scale(V3Sub(triB, min), scaling);
const Vec3V c = V3Scale(V3Sub(triC, min), scaling);
Vec3V cp;
FloatV result = Gu::distancePointTriangleSquared(p, a, b, c, u, v, cp);
closestP = V3Add(V3Scale(cp, invScaling), min);
return FMul(result, FMul(invScaling, invScaling));
}
aos::FloatV Gu::distancePointTriangleSquared( const aos::Vec3VArg p,
const aos::Vec3VArg a,
const aos::Vec3VArg b,
const aos::Vec3VArg c,
aos::FloatV& u,
aos::FloatV& v,
aos::Vec3V& closestP)
{
using namespace aos;
const FloatV zero = FZero();
const FloatV one = FOne();
//const Vec3V zero = V3Zero();
const Vec3V ab = V3Sub(b, a);
const Vec3V ac = V3Sub(c, a);
const Vec3V bc = V3Sub(c, b);
const Vec3V ap = V3Sub(p, a);
const Vec3V bp = V3Sub(p, b);
const Vec3V cp = V3Sub(p, c);
const FloatV d1 = V3Dot(ab, ap); // snom
const FloatV d2 = V3Dot(ac, ap); // tnom
const FloatV d3 = V3Dot(ab, bp); // -sdenom
const FloatV d4 = V3Dot(ac, bp); // unom = d4 - d3
const FloatV d5 = V3Dot(ab, cp); // udenom = d5 - d6
const FloatV d6 = V3Dot(ac, cp); // -tdenom
const FloatV unom = FSub(d4, d3);
const FloatV udenom = FSub(d5, d6);
//check if p in vertex region outside a
const BoolV con00 = FIsGrtr(zero, d1); // snom <= 0
const BoolV con01 = FIsGrtr(zero, d2); // tnom <= 0
const BoolV con0 = BAnd(con00, con01); // vertex region a
if(BAllEqTTTT(con0))
{
u = zero;
v = zero;
const Vec3V vv = V3Sub(p, a);
closestP = a;
return V3Dot(vv, vv);
}
//check if p in vertex region outside b
const BoolV con10 = FIsGrtrOrEq(d3, zero);
const BoolV con11 = FIsGrtrOrEq(d3, d4);
const BoolV con1 = BAnd(con10, con11); // vertex region b
if(BAllEqTTTT(con1))
{
u = one;
v = zero;
const Vec3V vv = V3Sub(p, b);
closestP = b;
return V3Dot(vv, vv);
}
//check if p in vertex region outside c
const BoolV con20 = FIsGrtrOrEq(d6, zero);
const BoolV con21 = FIsGrtrOrEq(d6, d5);
const BoolV con2 = BAnd(con20, con21); // vertex region c
if(BAllEqTTTT(con2))
{
u = zero;
v = one;
const Vec3V vv = V3Sub(p, c);
closestP = c;
return V3Dot(vv, vv);
}
//check if p in edge region of AB
const FloatV vc = FSub(FMul(d1, d4), FMul(d3, d2));
const BoolV con30 = FIsGrtr(zero, vc);
const BoolV con31 = FIsGrtrOrEq(d1, zero);
const BoolV con32 = FIsGrtr(zero, d3);
const BoolV con3 = BAnd(con30, BAnd(con31, con32));
if(BAllEqTTTT(con3))
{
const FloatV sScale = FDiv(d1, FSub(d1, d3));
const Vec3V closest3 = V3Add(a, V3Scale(ab, sScale));
u = sScale;
v = zero;
const Vec3V vv = V3Sub(p, closest3);
closestP = closest3;
return V3Dot(vv, vv);
}
//check if p in edge region of BC
const FloatV va = FSub(FMul(d3, d6),FMul(d5, d4));
const BoolV con40 = FIsGrtr(zero, va);
const BoolV con41 = FIsGrtrOrEq(d4, d3);
const BoolV con42 = FIsGrtrOrEq(d5, d6);
const BoolV con4 = BAnd(con40, BAnd(con41, con42));
if(BAllEqTTTT(con4))
{
const FloatV uScale = FDiv(unom, FAdd(unom, udenom));
const Vec3V closest4 = V3Add(b, V3Scale(bc, uScale));
u = FSub(one, uScale);
v = uScale;
const Vec3V vv = V3Sub(p, closest4);
closestP = closest4;
return V3Dot(vv, vv);
}
//check if p in edge region of AC
const FloatV vb = FSub(FMul(d5, d2), FMul(d1, d6));
const BoolV con50 = FIsGrtr(zero, vb);
const BoolV con51 = FIsGrtrOrEq(d2, zero);
const BoolV con52 = FIsGrtr(zero, d6);
const BoolV con5 = BAnd(con50, BAnd(con51, con52));
if(BAllEqTTTT(con5))
{
const FloatV tScale = FDiv(d2, FSub(d2, d6));
const Vec3V closest5 = V3Add(a, V3Scale(ac, tScale));
u = zero;
v = tScale;
const Vec3V vv = V3Sub(p, closest5);
closestP = closest5;
return V3Dot(vv, vv);
}
//P must project inside face region. Compute Q using Barycentric coordinates
const FloatV denom = FRecip(FAdd(va, FAdd(vb, vc)));
const FloatV t = FMul(vb, denom);
const FloatV w = FMul(vc, denom);
const Vec3V bCom = V3Scale(ab, t);
const Vec3V cCom = V3Scale(ac, w);
const Vec3V closest6 = V3Add(a, V3Add(bCom, cCom));
u = t;
v = w;
closestP = closest6;
const Vec3V vv = V3Sub(p, closest6);
return V3Dot(vv, vv);
}
| 12,541 | C++ | 31.746736 | 150 | 0.649709 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/distance/GuDistanceSegmentSegment.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuDistanceSegmentSegment.h"
using namespace physx;
using namespace aos;
static const float ZERO_TOLERANCE = 1e-06f;
// S0 = origin + extent * dir;
// S1 = origin - extent * dir;
PxReal Gu::distanceSegmentSegmentSquared( const PxVec3& origin0, const PxVec3& dir0, PxReal extent0,
const PxVec3& origin1, const PxVec3& dir1, PxReal extent1,
PxReal* param0, PxReal* param1)
{
const PxVec3 kDiff = origin0 - origin1;
const PxReal fA01 = -dir0.dot(dir1);
const PxReal fB0 = kDiff.dot(dir0);
const PxReal fB1 = -kDiff.dot(dir1);
const PxReal fC = kDiff.magnitudeSquared();
const PxReal fDet = PxAbs(1.0f - fA01*fA01);
PxReal fS0, fS1, fSqrDist, fExtDet0, fExtDet1, fTmpS0, fTmpS1;
if (fDet >= ZERO_TOLERANCE)
{
// segments are not parallel
fS0 = fA01*fB1-fB0;
fS1 = fA01*fB0-fB1;
fExtDet0 = extent0*fDet;
fExtDet1 = extent1*fDet;
if (fS0 >= -fExtDet0)
{
if (fS0 <= fExtDet0)
{
if (fS1 >= -fExtDet1)
{
if (fS1 <= fExtDet1) // region 0 (interior)
{
// minimum at two interior points of 3D lines
PxReal fInvDet = 1.0f/fDet;
fS0 *= fInvDet;
fS1 *= fInvDet;
fSqrDist = fS0*(fS0+fA01*fS1+2.0f*fB0) + fS1*(fA01*fS0+fS1+2.0f*fB1)+fC;
}
else // region 3 (side)
{
fS1 = extent1;
fTmpS0 = -(fA01*fS1+fB0);
if (fTmpS0 < -extent0)
{
fS0 = -extent0;
fSqrDist = fS0*(fS0-2.0f*fTmpS0) + fS1*(fS1+2.0f*fB1)+fC;
}
else if (fTmpS0 <= extent0)
{
fS0 = fTmpS0;
fSqrDist = -fS0*fS0+fS1*(fS1+2.0f*fB1)+fC;
}
else
{
fS0 = extent0;
fSqrDist = fS0*(fS0-2.0f*fTmpS0) + fS1*(fS1+2.0f*fB1)+fC;
}
}
}
else // region 7 (side)
{
fS1 = -extent1;
fTmpS0 = -(fA01*fS1+fB0);
if (fTmpS0 < -extent0)
{
fS0 = -extent0;
fSqrDist = fS0*(fS0-2.0f*fTmpS0) + fS1*(fS1+2.0f*fB1)+fC;
}
else if (fTmpS0 <= extent0)
{
fS0 = fTmpS0;
fSqrDist = -fS0*fS0+fS1*(fS1+2.0f*fB1)+fC;
}
else
{
fS0 = extent0;
fSqrDist = fS0*(fS0-2.0f*fTmpS0) + fS1*(fS1+2.0f*fB1)+fC;
}
}
}
else
{
if (fS1 >= -fExtDet1)
{
if (fS1 <= fExtDet1) // region 1 (side)
{
fS0 = extent0;
fTmpS1 = -(fA01*fS0+fB1);
if (fTmpS1 < -extent1)
{
fS1 = -extent1;
fSqrDist = fS1*(fS1-2.0f*fTmpS1) + fS0*(fS0+2.0f*fB0)+fC;
}
else if (fTmpS1 <= extent1)
{
fS1 = fTmpS1;
fSqrDist = -fS1*fS1+fS0*(fS0+2.0f*fB0)+fC;
}
else
{
fS1 = extent1;
fSqrDist = fS1*(fS1-2.0f*fTmpS1) + fS0*(fS0+2.0f*fB0)+fC;
}
}
else // region 2 (corner)
{
fS1 = extent1;
fTmpS0 = -(fA01*fS1+fB0);
if (fTmpS0 < -extent0)
{
fS0 = -extent0;
fSqrDist = fS0*(fS0-2.0f*fTmpS0) + fS1*(fS1+2.0f*fB1)+fC;
}
else if (fTmpS0 <= extent0)
{
fS0 = fTmpS0;
fSqrDist = -fS0*fS0+fS1*(fS1+2.0f*fB1)+fC;
}
else
{
fS0 = extent0;
fTmpS1 = -(fA01*fS0+fB1);
if (fTmpS1 < -extent1)
{
fS1 = -extent1;
fSqrDist = fS1*(fS1-2.0f*fTmpS1) + fS0*(fS0+2.0f*fB0)+fC;
}
else if (fTmpS1 <= extent1)
{
fS1 = fTmpS1;
fSqrDist = -fS1*fS1+fS0*(fS0+2.0f*fB0) + fC;
}
else
{
fS1 = extent1;
fSqrDist = fS1*(fS1-2.0f*fTmpS1) + fS0*(fS0+2.0f*fB0)+fC;
}
}
}
}
else // region 8 (corner)
{
fS1 = -extent1;
fTmpS0 = -(fA01*fS1+fB0);
if (fTmpS0 < -extent0)
{
fS0 = -extent0;
fSqrDist = fS0*(fS0-2.0f*fTmpS0) + fS1*(fS1+2.0f*fB1)+fC;
}
else if (fTmpS0 <= extent0)
{
fS0 = fTmpS0;
fSqrDist = -fS0*fS0+fS1*(fS1+2.0f*fB1)+fC;
}
else
{
fS0 = extent0;
fTmpS1 = -(fA01*fS0+fB1);
if (fTmpS1 > extent1)
{
fS1 = extent1;
fSqrDist = fS1*(fS1-2.0f*fTmpS1) + fS0*(fS0+2.0f*fB0)+fC;
}
else if (fTmpS1 >= -extent1)
{
fS1 = fTmpS1;
fSqrDist = -fS1*fS1+fS0*(fS0+2.0f*fB0) + fC;
}
else
{
fS1 = -extent1;
fSqrDist = fS1*(fS1-2.0f*fTmpS1) + fS0*(fS0+2.0f*fB0)+fC;
}
}
}
}
}
else
{
if (fS1 >= -fExtDet1)
{
if (fS1 <= fExtDet1) // region 5 (side)
{
fS0 = -extent0;
fTmpS1 = -(fA01*fS0+fB1);
if (fTmpS1 < -extent1)
{
fS1 = -extent1;
fSqrDist = fS1*(fS1-2.0f*fTmpS1) + fS0*(fS0+2.0f*fB0)+fC;
}
else if (fTmpS1 <= extent1)
{
fS1 = fTmpS1;
fSqrDist = -fS1*fS1+fS0*(fS0+2.0f*fB0)+fC;
}
else
{
fS1 = extent1;
fSqrDist = fS1*(fS1-2.0f*fTmpS1) + fS0*(fS0+2.0f*fB0)+fC;
}
}
else // region 4 (corner)
{
fS1 = extent1;
fTmpS0 = -(fA01*fS1+fB0);
if (fTmpS0 > extent0)
{
fS0 = extent0;
fSqrDist = fS0*(fS0-2.0f*fTmpS0) + fS1*(fS1+2.0f*fB1)+fC;
}
else if (fTmpS0 >= -extent0)
{
fS0 = fTmpS0;
fSqrDist = -fS0*fS0+fS1*(fS1+2.0f*fB1)+fC;
}
else
{
fS0 = -extent0;
fTmpS1 = -(fA01*fS0+fB1);
if (fTmpS1 < -extent1)
{
fS1 = -extent1;
fSqrDist = fS1*(fS1-2.0f*fTmpS1) + fS0*(fS0+2.0f*fB0)+fC;
}
else if (fTmpS1 <= extent1)
{
fS1 = fTmpS1;
fSqrDist = -fS1*fS1+fS0*(fS0+2.0f*fB0) + fC;
}
else
{
fS1 = extent1;
fSqrDist = fS1*(fS1-2.0f*fTmpS1) + fS0*(fS0+2.0f*fB0)+fC;
}
}
}
}
else // region 6 (corner)
{
fS1 = -extent1;
fTmpS0 = -(fA01*fS1+fB0);
if (fTmpS0 > extent0)
{
fS0 = extent0;
fSqrDist = fS0*(fS0-2.0f*fTmpS0) + fS1*(fS1+2.0f*fB1)+fC;
}
else if (fTmpS0 >= -extent0)
{
fS0 = fTmpS0;
fSqrDist = -fS0*fS0+fS1*(fS1+2.0f*fB1)+fC;
}
else
{
fS0 = -extent0;
fTmpS1 = -(fA01*fS0+fB1);
if (fTmpS1 < -extent1)
{
fS1 = -extent1;
fSqrDist = fS1*(fS1-2.0f*fTmpS1) + fS0*(fS0+2.0f*fB0)+fC;
}
else if (fTmpS1 <= extent1)
{
fS1 = fTmpS1;
fSqrDist = -fS1*fS1+fS0*(fS0+2.0f*fB0) + fC;
}
else
{
fS1 = extent1;
fSqrDist = fS1*(fS1-2.0f*fTmpS1) + fS0*(fS0+2.0f*fB0)+fC;
}
}
}
}
}
else
{
// The segments are parallel.
PxReal fE0pE1 = extent0 + extent1;
PxReal fSign = (fA01 > 0.0f ? -1.0f : 1.0f);
PxReal b0Avr = 0.5f*(fB0 - fSign*fB1);
PxReal fLambda = -b0Avr;
if(fLambda < -fE0pE1)
{
fLambda = -fE0pE1;
}
else if(fLambda > fE0pE1)
{
fLambda = fE0pE1;
}
fS1 = -fSign*fLambda*extent1/fE0pE1;
fS0 = fLambda + fSign*fS1;
fSqrDist = fLambda*(fLambda + 2.0f*b0Avr) + fC;
}
if(param0)
*param0 = fS0;
if(param1)
*param1 = fS1;
// account for numerical round-off error
return physx::intrinsics::selectMax(0.0f, fSqrDist);
}
PxReal Gu::distanceSegmentSegmentSquared( const PxVec3& origin0, const PxVec3& extent0,
const PxVec3& origin1, const PxVec3& extent1,
PxReal* param0,
PxReal* param1)
{
// Some conversion is needed between the old & new code
// Old:
// segment (s0, s1)
// origin = s0
// extent = s1 - s0
//
// New:
// s0 = origin + extent * dir;
// s1 = origin - extent * dir;
// dsequeira: is this really sensible? We use a highly optimized Wild Magic routine,
// then use a segment representation that requires an expensive conversion to/from...
PxVec3 dir0 = extent0;
const PxVec3 center0 = origin0 + extent0*0.5f;
PxReal length0 = extent0.magnitude(); //AM: change to make it work for degenerate (zero length) segments.
const bool b0 = length0 != 0.0f;
PxReal oneOverLength0 = 0.0f;
if(b0)
{
oneOverLength0 = 1.0f / length0;
dir0 *= oneOverLength0;
length0 *= 0.5f;
}
PxVec3 dir1 = extent1;
const PxVec3 center1 = origin1 + extent1*0.5f;
PxReal length1 = extent1.magnitude();
const bool b1 = length1 != 0.0f;
PxReal oneOverLength1 = 0.0f;
if(b1)
{
oneOverLength1 = 1.0f / length1;
dir1 *= oneOverLength1;
length1 *= 0.5f;
}
// the return param vals have -extent = s0, extent = s1
const PxReal d2 = distanceSegmentSegmentSquared(center0, dir0, length0,
center1, dir1, length1,
param0, param1);
//ML : This is wrong for some reason, I guess it has precision issue
//// renormalize into the 0 = s0, 1 = s1 range
//if (param0)
// *param0 = b0 ? ((*param0) * oneOverLength0 * 0.5f + 0.5f) : 0.0f;
//if (param1)
// *param1 = b1 ? ((*param1) * oneOverLength1 * 0.5f + 0.5f) : 0.0f;
if(param0)
*param0 = b0 ? ((length0 + (*param0))*oneOverLength0) : 0.0f;
if(param1)
*param1 = b1 ? ((length1 + (*param1))*oneOverLength1) : 0.0f;
return d2;
}
/*
S0 = origin + extent * dir;
S1 = origin + extent * dir;
dir is the vector from start to end point
p1 is the start point of segment1
d1 is the direction vector(q1 - p1)
p2 is the start point of segment2
d2 is the direction vector(q2 - p2)
*/
FloatV Gu::distanceSegmentSegmentSquared( const Vec3VArg p1,
const Vec3VArg d1,
const Vec3VArg p2,
const Vec3VArg d2,
FloatV& s,
FloatV& t)
{
const FloatV zero = FZero();
const FloatV one = FOne();
const FloatV eps = FEps();
const Vec3V r = V3Sub(p1, p2);
const Vec4V combinedDot = V3Dot4(d1, d1, d2, d2, d1, d2, d1, r);
const Vec4V combinedRecip = V4Sel(V4IsGrtr(combinedDot, V4Splat(eps)), V4Recip(combinedDot), V4Splat(zero));
const FloatV a = V4GetX(combinedDot);
const FloatV e = V4GetY(combinedDot);
const FloatV b = V4GetZ(combinedDot);
const FloatV c = V4GetW(combinedDot);
const FloatV aRecip = V4GetX(combinedRecip);//FSel(FIsGrtr(a, eps), FRecip(a), zero);
const FloatV eRecip = V4GetY(combinedRecip);//FSel(FIsGrtr(e, eps), FRecip(e), zero);
const FloatV f = V3Dot(d2, r);
/*
s = (b*f - c*e)/(a*e - b*b);
t = (a*f - b*c)/(a*e - b*b);
s = (b*t - c)/a;
t = (b*s + f)/e;
*/
//if segments not parallel, the general non-degenerated case, compute closest point on two segments and clamp to segment1
const FloatV denom = FSub(FMul(a, e), FMul(b, b));
const FloatV temp = FSub(FMul(b, f), FMul(c, e));
const FloatV s0 = FClamp(FDiv(temp, denom), zero, one);
//if segment is parallel, demon < eps
const BoolV con2 = FIsGrtr(eps, denom);//FIsEq(denom, zero);
const FloatV sTmp = FSel(con2, FHalf(), s0);
//compute point on segment2 closest to segment1
//const FloatV tTmp = FMul(FAdd(FMul(b, sTmp), f), eRecip);
const FloatV tTmp = FMul(FScaleAdd(b, sTmp, f), eRecip);
//if t is in [zero, one], done. otherwise clamp t
const FloatV t2 = FClamp(tTmp, zero, one);
//recompute s for the new value
const FloatV comp = FMul(FSub(FMul(b,t2), c), aRecip);
const FloatV s2 = FClamp(comp, zero, one);
s = s2;
t = t2;
const Vec3V closest1 = V3ScaleAdd(d1, s2, p1);//V3Add(p1, V3Scale(d1, tempS));
const Vec3V closest2 = V3ScaleAdd(d2, t2, p2);//V3Add(p2, V3Scale(d2, tempT));
const Vec3V vv = V3Sub(closest1, closest2);
return V3Dot(vv, vv);
}
/*
segment (p, d) and segment (p02, d02)
segment (p, d) and segment (p12, d12)
segment (p, d) and segment (p22, d22)
segment (p, d) and segment (p32, d32)
*/
Vec4V Gu::distanceSegmentSegmentSquared4( const Vec3VArg p, const Vec3VArg d0,
const Vec3VArg p02, const Vec3VArg d02,
const Vec3VArg p12, const Vec3VArg d12,
const Vec3VArg p22, const Vec3VArg d22,
const Vec3VArg p32, const Vec3VArg d32,
Vec4V& s, Vec4V& t)
{
const Vec4V zero = V4Zero();
const Vec4V one = V4One();
const Vec4V eps = V4Eps();
const Vec4V half = V4Splat(FHalf());
const Vec4V d0X = V4Splat(V3GetX(d0));
const Vec4V d0Y = V4Splat(V3GetY(d0));
const Vec4V d0Z = V4Splat(V3GetZ(d0));
const Vec4V pX = V4Splat(V3GetX(p));
const Vec4V pY = V4Splat(V3GetY(p));
const Vec4V pZ = V4Splat(V3GetZ(p));
Vec4V d024 = Vec4V_From_Vec3V(d02);
Vec4V d124 = Vec4V_From_Vec3V(d12);
Vec4V d224 = Vec4V_From_Vec3V(d22);
Vec4V d324 = Vec4V_From_Vec3V(d32);
Vec4V p024 = Vec4V_From_Vec3V(p02);
Vec4V p124 = Vec4V_From_Vec3V(p12);
Vec4V p224 = Vec4V_From_Vec3V(p22);
Vec4V p324 = Vec4V_From_Vec3V(p32);
Vec4V d0123X, d0123Y, d0123Z;
Vec4V p0123X, p0123Y, p0123Z;
PX_TRANSPOSE_44_34(d024, d124, d224, d324, d0123X, d0123Y, d0123Z);
PX_TRANSPOSE_44_34(p024, p124, p224, p324, p0123X, p0123Y, p0123Z);
const Vec4V rX = V4Sub(pX, p0123X);
const Vec4V rY = V4Sub(pY, p0123Y);
const Vec4V rZ = V4Sub(pZ, p0123Z);
//TODO - store this in a transposed state and avoid so many dot products?
const FloatV dd = V3Dot(d0, d0);
const Vec4V e = V4MulAdd(d0123Z, d0123Z, V4MulAdd(d0123X, d0123X, V4Mul(d0123Y, d0123Y)));
const Vec4V b = V4MulAdd(d0Z, d0123Z, V4MulAdd(d0X, d0123X, V4Mul(d0Y, d0123Y)));
const Vec4V c = V4MulAdd(d0Z, rZ, V4MulAdd(d0X, rX, V4Mul(d0Y, rY)));
const Vec4V f = V4MulAdd(d0123Z, rZ, V4MulAdd(d0123X, rX, V4Mul(d0123Y, rY)));
const Vec4V a(V4Splat(dd));
const Vec4V aRecip(V4Recip(a));
const Vec4V eRecip(V4Recip(e));
//if segments not parallell, compute closest point on two segments and clamp to segment1
const Vec4V denom = V4Sub(V4Mul(a, e), V4Mul(b, b));
const Vec4V temp = V4Sub(V4Mul(b, f), V4Mul(c, e));
const Vec4V s0 = V4Clamp(V4Div(temp, denom), zero, one);
//test whether segments are parallel
const BoolV con2 = V4IsGrtrOrEq(eps, denom);
const Vec4V sTmp = V4Sel(con2, half, s0);
//compute point on segment2 closest to segment1
const Vec4V tTmp = V4Mul(V4Add(V4Mul(b, sTmp), f), eRecip);
//if t is in [zero, one], done. otherwise clamp t
const Vec4V t2 = V4Clamp(tTmp, zero, one);
//recompute s for the new value
const Vec4V comp = V4Mul(V4Sub(V4Mul(b,t2), c), aRecip);
const BoolV aaNearZero = V4IsGrtrOrEq(eps, a); // check if aRecip is valid (aa>eps)
const Vec4V s2 = V4Sel(aaNearZero, V4Zero(), V4Clamp(comp, zero, one));
/* s = V4Sel(con0, zero, V4Sel(con1, cd, s2));
t = V4Sel(con1, zero, V4Sel(con0, cg, t2)); */
s = s2;
t = t2;
const Vec4V closest1X = V4MulAdd(d0X, s2, pX);
const Vec4V closest1Y = V4MulAdd(d0Y, s2, pY);
const Vec4V closest1Z = V4MulAdd(d0Z, s2, pZ);
const Vec4V closest2X = V4MulAdd(d0123X, t2, p0123X);
const Vec4V closest2Y = V4MulAdd(d0123Y, t2, p0123Y);
const Vec4V closest2Z = V4MulAdd(d0123Z, t2, p0123Z);
const Vec4V vvX = V4Sub(closest1X, closest2X);
const Vec4V vvY = V4Sub(closest1Y, closest2Y);
const Vec4V vvZ = V4Sub(closest1Z, closest2Z);
const Vec4V vd = V4MulAdd(vvX, vvX, V4MulAdd(vvY, vvY, V4Mul(vvZ, vvZ)));
return vd;
}
| 20,431 | C++ | 34.595819 | 122 | 0.484509 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/distance/GuDistanceSegmentTriangle.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxIntrinsics.h"
#include "GuDistanceSegmentTriangle.h"
#include "GuDistancePointTriangle.h"
#include "GuDistanceSegmentSegment.h"
#include "GuBarycentricCoordinates.h"
using namespace physx;
using namespace Gu;
// ptchernev:
// The Magic Software code uses a relative error test for parallel case.
// The Novodex code does not presumably as an optimization.
// Since the Novodex code is working in the trunk I see no reason
// to reintroduce the relative error test here.
// PT: this might just be because the relative error test has been added
// after we grabbed the code. I don't remember making this change. A good
// idea would be NOT to refactor Magic's code, to easily grab updated
// versions from the website.............................................
// ptchernev:
// The code has been modified to use a relative error test since the absolute
// test would break down for small geometries. (TTP 4021)
static PX_FORCE_INLINE void updateClosestHit( PxReal fSqrDist0, PxReal fR0, PxReal fS0, PxReal fT0,
PxReal& fSqrDist, PxReal& fR, PxReal& fS, PxReal& fT)
{
if(fSqrDist0 < fSqrDist)
{
fSqrDist = fSqrDist0;
fR = fR0;
fS = fS0;
fT = fT0;
}
}
PxReal Gu::distanceSegmentTriangleSquared( const PxVec3& origin, const PxVec3& dir,
const PxVec3& p0, const PxVec3& triEdge0, const PxVec3& triEdge1,
PxReal* t, PxReal* u, PxReal* v)
{
const PxReal fA00 = dir.magnitudeSquared();
if(fA00 < 1e-6f*1e-6f)
{
if(t)
*t = 0.0f;
return distancePointTriangleSquared(origin, p0, triEdge0, triEdge1, u, v);
}
const PxVec3 kDiff = p0 - origin;
const PxReal fA01 = -(dir.dot(triEdge0));
const PxReal fA02 = -(dir.dot(triEdge1));
const PxReal fA11 = triEdge0.magnitudeSquared();
const PxReal fA12 = triEdge0.dot(triEdge1);
const PxReal fA22 = triEdge1.dot(triEdge1);
const PxReal fB0 = -(kDiff.dot(dir));
const PxReal fB1 = kDiff.dot(triEdge0);
const PxReal fB2 = kDiff.dot(triEdge1);
const PxReal fCof00 = fA11*fA22-fA12*fA12;
const PxReal fCof01 = fA02*fA12-fA01*fA22;
const PxReal fCof02 = fA01*fA12-fA02*fA11;
const PxReal fDet = fA00*fCof00+fA01*fCof01+fA02*fCof02;
PxReal fSqrDist, fSqrDist0, fR, fS, fT, fR0, fS0, fT0;
// Set up for a relative error test on the angle between ray direction
// and triangle normal to determine parallel/nonparallel status.
const PxVec3 kNormal = triEdge0.cross(triEdge1);
const PxReal fDot = kNormal.dot(dir);
if(fDot*fDot >= 1e-6f*dir.magnitudeSquared()*kNormal.magnitudeSquared())
{
const PxReal fCof11 = fA00*fA22-fA02*fA02;
const PxReal fCof12 = fA02*fA01-fA00*fA12;
const PxReal fCof22 = fA00*fA11-fA01*fA01;
const PxReal fInvDet = fDet == 0.0f ? 0.0f : 1.0f/fDet;
const PxReal fRhs0 = -fB0*fInvDet;
const PxReal fRhs1 = -fB1*fInvDet;
const PxReal fRhs2 = -fB2*fInvDet;
fR = fCof00*fRhs0+fCof01*fRhs1+fCof02*fRhs2;
fS = fCof01*fRhs0+fCof11*fRhs1+fCof12*fRhs2;
fT = fCof02*fRhs0+fCof12*fRhs1+fCof22*fRhs2;
if(fR < 0.0f)
{
if(fS+fT <= 1.0f)
{
if(fS < 0.0f)
{
if(fT < 0.0f) // region 4m
{
// minimum on face s=0 or t=0 or r=0
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge1, &fR, &fT);
fS = 0.0f;
fSqrDist0 = distanceSegmentSegmentSquared(origin, dir, p0, triEdge0, &fR0, &fS0);
fT0 = 0.0f;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else // region 3m
{
// minimum on face s=0 or r=0
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge1, &fR, &fT);
fS = 0.0f;
}
fSqrDist0 = distancePointTriangleSquared(origin, p0, triEdge0, triEdge1, &fS0, &fT0);
fR0 = 0.0f;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else if(fT < 0.0f) // region 5m
{
// minimum on face t=0 or r=0
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge0, &fR, &fS);
fT = 0.0f;
fSqrDist0 = distancePointTriangleSquared(origin, p0, triEdge0, triEdge1, &fS0, &fT0);
fR0 = 0.0f;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else // region 0m
{
// minimum on face r=0
fSqrDist = distancePointTriangleSquared(origin, p0, triEdge0, triEdge1, &fS, &fT);
fR = 0.0f;
}
}
else
{
if(fS < 0.0f) // region 2m
{
// minimum on face s=0 or s+t=1 or r=0
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge1, &fR, &fT);
fS = 0.0f;
const PxVec3 kTriSegOrig = p0+triEdge0;
const PxVec3 kTriSegDir = triEdge1-triEdge0;
fSqrDist0 = distanceSegmentSegmentSquared(origin, dir, kTriSegOrig, kTriSegDir, &fR0, &fT0);
fS0 = 1.0f-fT0;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else if(fT < 0.0f) // region 6m
{
// minimum on face t=0 or s+t=1 or r=0
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge0, &fR, &fS);
fT = 0.0f;
const PxVec3 kTriSegOrig = p0+triEdge0;
const PxVec3 kTriSegDir = triEdge1-triEdge0;
fSqrDist0 = distanceSegmentSegmentSquared(origin, dir, kTriSegOrig, kTriSegDir, &fR0, &fT0);
fS0 = 1.0f-fT0;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else // region 1m
{
// minimum on face s+t=1 or r=0
const PxVec3 kTriSegOrig = p0+triEdge0;
const PxVec3 kTriSegDir = triEdge1-triEdge0;
fSqrDist = distanceSegmentSegmentSquared(origin, dir, kTriSegOrig, kTriSegDir, &fR, &fT);
fS = 1.0f-fT;
}
fSqrDist0 = distancePointTriangleSquared(origin, p0, triEdge0, triEdge1, &fS0, &fT0);
fR0 = 0.0f;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
}
else if(fR <= 1.0f)
{
if(fS+fT <= 1.0f)
{
if(fS < 0.0f)
{
if(fT < 0.0f) // region 4
{
// minimum on face s=0 or t=0
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge1, &fR, &fT);
fS = 0.0f;
fSqrDist0 = distanceSegmentSegmentSquared(origin, dir, p0, triEdge0, &fR0, &fS0);
fT0 = 0.0f;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else // region 3
{
// minimum on face s=0
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge1, &fR, &fT);
fS = 0.0f;
}
}
else if(fT < 0.0f) // region 5
{
// minimum on face t=0
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge0, &fR, &fS);
fT = 0.0f;
}
else // region 0
{
// global minimum is interior, done
fSqrDist = fR*(fA00*fR+fA01*fS+fA02*fT+2.0f*fB0)
+fS*(fA01*fR+fA11*fS+fA12*fT+2.0f*fB1)
+fT*(fA02*fR+fA12*fS+fA22*fT+2.0f*fB2)
+kDiff.magnitudeSquared();
}
}
else
{
if(fS < 0.0f) // region 2
{
// minimum on face s=0 or s+t=1
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge1, &fR, &fT);
fS = 0.0f;
const PxVec3 kTriSegOrig = p0+triEdge0;
const PxVec3 kTriSegDir = triEdge1-triEdge0;
fSqrDist0 = distanceSegmentSegmentSquared(origin, dir, kTriSegOrig, kTriSegDir, &fR0, &fT0);
fS0 = 1.0f-fT0;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else if(fT < 0.0f) // region 6
{
// minimum on face t=0 or s+t=1
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge0, &fR, &fS);
fT = 0.0f;
const PxVec3 kTriSegOrig = p0+triEdge0;
const PxVec3 kTriSegDir = triEdge1-triEdge0;
fSqrDist0 = distanceSegmentSegmentSquared(origin, dir, kTriSegOrig, kTriSegDir, &fR0, &fT0);
fS0 = 1.0f-fT0;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else // region 1
{
// minimum on face s+t=1
const PxVec3 kTriSegOrig = p0+triEdge0;
const PxVec3 kTriSegDir = triEdge1-triEdge0;
fSqrDist = distanceSegmentSegmentSquared(origin, dir, kTriSegOrig, kTriSegDir, &fR, &fT);
fS = 1.0f-fT;
}
}
}
else // fR > 1
{
if(fS+fT <= 1.0f)
{
if(fS < 0.0f)
{
if(fT < 0.0f) // region 4p
{
// minimum on face s=0 or t=0 or r=1
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge1, &fR, &fT);
fS = 0.0f;
fSqrDist0 = distanceSegmentSegmentSquared(origin, dir, p0, triEdge0, &fR0, &fS0);
fT0 = 0.0f;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else // region 3p
{
// minimum on face s=0 or r=1
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge1, &fR, &fT);
fS = 0.0f;
}
const PxVec3 kPt = origin+dir;
fSqrDist0 = distancePointTriangleSquared(kPt, p0, triEdge0, triEdge1, &fS0, &fT0);
fR0 = 1.0f;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else if(fT < 0.0f) // region 5p
{
// minimum on face t=0 or r=1
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge0, &fR, &fS);
fT = 0.0f;
const PxVec3 kPt = origin+dir;
fSqrDist0 = distancePointTriangleSquared(kPt, p0, triEdge0, triEdge1, &fS0, &fT0);
fR0 = 1.0f;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else // region 0p
{
// minimum face on r=1
const PxVec3 kPt = origin+dir;
fSqrDist = distancePointTriangleSquared(kPt, p0, triEdge0, triEdge1, &fS, &fT);
fR = 1.0f;
}
}
else
{
if(fS < 0.0f) // region 2p
{
// minimum on face s=0 or s+t=1 or r=1
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge1, &fR, &fT);
fS = 0.0f;
const PxVec3 kTriSegOrig = p0+triEdge0;
const PxVec3 kTriSegDir = triEdge1-triEdge0;
fSqrDist0 = distanceSegmentSegmentSquared(origin, dir, kTriSegOrig, kTriSegDir, &fR0, &fT0);
fS0 = 1.0f-fT0;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else if(fT < 0.0f) // region 6p
{
// minimum on face t=0 or s+t=1 or r=1
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge0, &fR, &fS);
fT = 0.0f;
const PxVec3 kTriSegOrig = p0+triEdge0;
const PxVec3 kTriSegDir = triEdge1-triEdge0;
fSqrDist0 = distanceSegmentSegmentSquared(origin, dir, kTriSegOrig, kTriSegDir, &fR0, &fT0);
fS0 = 1.0f-fT0;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
else // region 1p
{
// minimum on face s+t=1 or r=1
const PxVec3 kTriSegOrig = p0+triEdge0;
const PxVec3 kTriSegDir = triEdge1-triEdge0;
fSqrDist = distanceSegmentSegmentSquared(origin, dir, kTriSegOrig, kTriSegDir, &fR, &fT);
fS = 1.0f-fT;
}
const PxVec3 kPt = origin+dir;
fSqrDist0 = distancePointTriangleSquared(kPt, p0, triEdge0, triEdge1, &fS0, &fT0);
fR0 = 1.0f;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
}
}
else
{
// segment and triangle are parallel
fSqrDist = distanceSegmentSegmentSquared(origin, dir, p0, triEdge0, &fR, &fS);
fT = 0.0f;
fSqrDist0 = distanceSegmentSegmentSquared(origin, dir, p0, triEdge1, &fR0, &fT0);
fS0 = 0.0f;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
const PxVec3 kTriSegOrig = p0+triEdge0;
const PxVec3 kTriSegDir = triEdge1 - triEdge0;
fSqrDist0 = distanceSegmentSegmentSquared(origin, dir, kTriSegOrig, kTriSegDir, &fR0, &fT0);
fS0 = 1.0f-fT0;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
fSqrDist0 = distancePointTriangleSquared(origin, p0, triEdge0, triEdge1, &fS0, &fT0);
fR0 = 0.0f;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
const PxVec3 kPt = origin+dir;
fSqrDist0 = distancePointTriangleSquared(kPt, p0, triEdge0, triEdge1, &fS0, &fT0);
fR0 = 1.0f;
updateClosestHit(fSqrDist0, fR0, fS0, fT0, fSqrDist, fR, fS, fT);
}
if(t) *t = fR;
if(u) *u = fS;
if(v) *v = fT;
// account for numerical round-off error
return physx::intrinsics::selectMax(0.0f, fSqrDist);
}
// closest0 is the closest point on segment pq
// closest1 is the closest point on triangle abc
aos::FloatV Gu::distanceSegmentTriangleSquared( const aos::Vec3VArg p, const aos::Vec3VArg q,
const aos::Vec3VArg a, const aos::Vec3VArg b, const aos::Vec3VArg c,
aos::Vec3V& closest0, aos::Vec3V& closest1)
{
using namespace aos;
const FloatV zero = FZero();
//const FloatV one = FOne();
//const FloatV parallelTolerance = FloatV_From_F32(PX_PARALLEL_TOLERANCE);
const Vec3V pq = V3Sub(q, p);
const Vec3V ab = V3Sub(b, a);
const Vec3V ac = V3Sub(c, a);
const Vec3V bc = V3Sub(c, b);
const Vec3V ap = V3Sub(p, a);
const Vec3V aq = V3Sub(q, a);
//This is used to calculate the barycentric coordinate
const FloatV d00 = V3Dot(ab,ab);
const FloatV d01 = V3Dot(ab, ac);
const FloatV d11 = V3Dot(ac, ac);
const FloatV tDenom = FSub(FMul(d00, d11), FMul(d01, d01));
const FloatV bdenom = FSel(FIsGrtr(tDenom, zero), FRecip(tDenom), zero);
const Vec3V n = V3Normalize(V3Cross(ab, ac)); // normalize vector
//compute the closest point of p and triangle plane abc
const FloatV dist3 = V3Dot(ap, n);
const FloatV sqDist3 = FMul(dist3, dist3);
//compute the closest point of q and triangle plane abc
const FloatV dist4 = V3Dot(aq, n);
const FloatV sqDist4 = FMul(dist4, dist4);
const FloatV dMul = FMul(dist3, dist4);
const BoolV con = FIsGrtr(zero, dMul);
// intersect with the plane
if(BAllEqTTTT(con))
{
//compute the intersect point
const FloatV nom = FNeg(V3Dot(n, ap));
const FloatV denom = FRecip(V3Dot(n, pq));
const FloatV t = FMul(nom, denom);
const Vec3V ip = V3ScaleAdd(pq, t, p);//V3Add(p, V3Scale(pq, t));
const Vec3V v2 = V3Sub(ip, a);
const FloatV d20 = V3Dot(v2, ab);
const FloatV d21 = V3Dot(v2, ac);
const FloatV v0 = FMul(FSub(FMul(d11, d20), FMul(d01, d21)), bdenom);
const FloatV w0 = FMul(FSub(FMul(d00, d21), FMul(d01, d20)), bdenom);
const BoolV con0 = isValidTriangleBarycentricCoord(v0, w0);
if(BAllEqTTTT(con0))
{
closest0 = closest1 = ip;
return zero;
}
}
Vec4V t40, t41;
const Vec4V sqDist44 = distanceSegmentSegmentSquared4(p,pq,a,ab, b,bc, a,ac, a,ab, t40, t41);
const FloatV t00 = V4GetX(t40);
const FloatV t10 = V4GetY(t40);
const FloatV t20 = V4GetZ(t40);
const FloatV t01 = V4GetX(t41);
const FloatV t11 = V4GetY(t41);
const FloatV t21 = V4GetZ(t41);
const FloatV sqDist0(V4GetX(sqDist44));
const FloatV sqDist1(V4GetY(sqDist44));
const FloatV sqDist2(V4GetZ(sqDist44));
const Vec3V closestP00 = V3ScaleAdd(pq, t00, p);
const Vec3V closestP01 = V3ScaleAdd(ab, t01, a);
const Vec3V closestP10 = V3ScaleAdd(pq, t10, p);
const Vec3V closestP11 = V3ScaleAdd(bc, t11, b);
const Vec3V closestP20 = V3ScaleAdd(pq, t20, p);
const Vec3V closestP21 = V3ScaleAdd(ac, t21, a);
//Get the closest point of all edges
const BoolV con20 = FIsGrtr(sqDist1, sqDist0);
const BoolV con21 = FIsGrtr(sqDist2, sqDist0);
const BoolV con2 = BAnd(con20,con21);
const BoolV con30 = FIsGrtrOrEq(sqDist0, sqDist1);
const BoolV con31 = FIsGrtr(sqDist2, sqDist1);
const BoolV con3 = BAnd(con30, con31);
const FloatV sqDistPE = FSel(con2, sqDist0, FSel(con3, sqDist1, sqDist2));
//const FloatV tValue = FSel(con2, t00, FSel(con3, t10, t20));
const Vec3V closestPE0 = V3Sel(con2, closestP00, V3Sel(con3, closestP10, closestP20)); // closestP on segment
const Vec3V closestPE1 = V3Sel(con2, closestP01, V3Sel(con3, closestP11, closestP21)); // closestP on triangle
const Vec3V closestP31 = V3NegScaleSub(n, dist3, p);//V3Sub(p, V3Scale(n, dist3));
const Vec3V closestP30 = p;
//Compute the barycentric coordinate for project point of q
const Vec3V pV20 = V3Sub(closestP31, a);
const FloatV pD20 = V3Dot(pV20, ab);
const FloatV pD21 = V3Dot(pV20, ac);
const FloatV v0 = FMul(FSub(FMul(d11, pD20), FMul(d01, pD21)), bdenom);
const FloatV w0 = FMul(FSub(FMul(d00, pD21), FMul(d01, pD20)), bdenom);
//check closestP3 is inside the triangle
const BoolV con0 = isValidTriangleBarycentricCoord(v0, w0);
const Vec3V closestP41 = V3NegScaleSub(n, dist4, q);// V3Sub(q, V3Scale(n, dist4));
const Vec3V closestP40 = q;
//Compute the barycentric coordinate for project point of q
const Vec3V qV20 = V3Sub(closestP41, a);
const FloatV qD20 = V3Dot(qV20, ab);
const FloatV qD21 = V3Dot(qV20, ac);
const FloatV v1 = FMul(FSub(FMul(d11, qD20), FMul(d01, qD21)), bdenom);
const FloatV w1 = FMul(FSub(FMul(d00, qD21), FMul(d01, qD20)), bdenom);
const BoolV con1 = isValidTriangleBarycentricCoord(v1, w1);
// p is interior point but not q
const BoolV d0 = FIsGrtr(sqDistPE, sqDist3);
const Vec3V c00 = V3Sel(d0, closestP30, closestPE0);
const Vec3V c01 = V3Sel(d0, closestP31, closestPE1);
// q is interior point but not p
const BoolV d1 = FIsGrtr(sqDistPE, sqDist4);
const Vec3V c10 = V3Sel(d1, closestP40, closestPE0);
const Vec3V c11 = V3Sel(d1, closestP41, closestPE1);
// p and q are interior point
const BoolV d2 = FIsGrtr(sqDist4, sqDist3);
const Vec3V c20 = V3Sel(d2, closestP30, closestP40);
const Vec3V c21 = V3Sel(d2, closestP31, closestP41);
const BoolV cond2 = BAnd(con0, con1);
const Vec3V closestP0 = V3Sel(cond2, c20, V3Sel(con0, c00, V3Sel(con1, c10, closestPE0)));
const Vec3V closestP1 = V3Sel(cond2, c21, V3Sel(con0, c01, V3Sel(con1, c11, closestPE1)));
const Vec3V vv = V3Sub(closestP1, closestP0);
closest0 = closestP0;
closest1 = closestP1;
return V3Dot(vv, vv);
}
| 19,157 | C++ | 35.701149 | 111 | 0.671295 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepSphereSphere.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSweepSphereSphere.h"
#include "foundation/PxUtilities.h"
using namespace physx;
using namespace Gu;
// Adapted from Gamasutra (Gomez article)
// Return true if r1 and r2 are real
static PX_FORCE_INLINE bool quadraticFormula(const PxReal a, const PxReal b, const PxReal c, PxReal& r1, PxReal& r2)
{
const PxReal q = b*b - 4*a*c;
if(q>=0.0f)
{
PX_ASSERT(a!=0.0f);
const PxReal sq = PxSqrt(q);
const PxReal d = 1.0f / (2.0f*a);
r1 = (-b + sq) * d;
r2 = (-b - sq) * d;
return true;//real roots
}
else
{
return false;//complex roots
}
}
static bool sphereSphereSweep( const PxReal ra, //radius of sphere A
const PxVec3& A0, //previous position of sphere A
const PxVec3& A1, //current position of sphere A
const PxReal rb, //radius of sphere B
const PxVec3& B0, //previous position of sphere B
const PxVec3& B1, //current position of sphere B
PxReal& u0, //normalized time of first collision
PxReal& u1 //normalized time of second collision
)
{
const PxVec3 va = A1 - A0;
const PxVec3 vb = B1 - B0;
const PxVec3 AB = B0 - A0;
const PxVec3 vab = vb - va; // relative velocity (in normalized time)
const PxReal rab = ra + rb;
const PxReal a = vab.dot(vab); //u*u coefficient
const PxReal b = 2.0f*(vab.dot(AB)); //u coefficient
const PxReal c = (AB.dot(AB)) - rab*rab; //constant term
//check if they're currently overlapping
if(c<=0.0f || a==0.0f)
{
u0 = 0.0f;
u1 = 0.0f;
return true;
}
//check if they hit each other during the frame
if(quadraticFormula(a, b, c, u0, u1))
{
if(u0>u1)
PxSwap(u0, u1);
// u0<u1
// if(u0<0.0f || u1>1.0f) return false;
if(u1<0.0f || u0>1.0f) return false;
return true;
}
return false;
}
bool Gu::sweepSphereSphere(const PxVec3& center0, PxReal radius0, const PxVec3& center1, PxReal radius1, const PxVec3& motion, PxReal& d, PxVec3& nrm)
{
const PxVec3 movedCenter = center1 + motion;
PxReal tmp;
if(!sphereSphereSweep(radius0, center0, center0, radius1, center1, movedCenter, d, tmp))
return false;
// Compute normal
// PT: if spheres initially overlap, the convention is that returned normal = -sweep direction
if(d==0.0f)
nrm = -motion;
else
nrm = (center1 + d * motion) - center0;
nrm.normalize();
return true;
}
| 4,000 | C++ | 33.491379 | 150 | 0.7035 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepCapsuleTriangle.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSweepCapsuleTriangle.h"
#include "GuIntersectionCapsuleTriangle.h"
#include "GuDistanceSegmentTriangle.h"
#include "GuIntersectionTriangleBox.h"
#include "GuSweepSphereTriangle.h"
#include "GuInternal.h"
using namespace physx;
using namespace Gu;
using namespace physx::aos;
#define COLINEARITY_EPSILON 0.00001f
///////////////////////////////////////////////////////////////////////////////
#define OUTPUT_TRI(pp0, pp1, pp2){ \
extrudedTris[nbExtrudedTris].verts[0] = pp0; \
extrudedTris[nbExtrudedTris].verts[1] = pp1; \
extrudedTris[nbExtrudedTris].verts[2] = pp2; \
extrudedTris[nbExtrudedTris].denormalizedNormal(extrudedTrisNormals[nbExtrudedTris]); \
nbExtrudedTris++;}
#define OUTPUT_TRI2(p0, p1, p2, d){ \
PxTriangle& tri = extrudedTris[nbExtrudedTris]; \
tri.verts[0] = p0; \
tri.verts[1] = p1; \
tri.verts[2] = p2; \
PxVec3 nrm; \
tri.denormalizedNormal(nrm); \
if(nrm.dot(d)>0.0f) { \
PxVec3 tmp = tri.verts[1]; \
tri.verts[1] = tri.verts[2]; \
tri.verts[2] = tmp; \
nrm = -nrm; \
} \
extrudedTrisNormals[nbExtrudedTris] = nrm; \
nbExtrudedTris++; }
//#define NEW_VERSION
bool Gu::sweepCapsuleTriangles_Precise( PxU32 nbTris, const PxTriangle* PX_RESTRICT triangles, // Triangle data
const Capsule& capsule, // Capsule data
const PxVec3& unitDir, const PxReal distance, // Ray data
const PxU32* PX_RESTRICT cachedIndex, // Cache data
PxGeomSweepHit& hit, PxVec3& triNormalOut, // Results
PxHitFlags hitFlags, bool isDoubleSided, // Query modifiers
const BoxPadded* cullBox) // Cull data
{
if(!nbTris)
return false;
const bool meshBothSides = hitFlags & PxHitFlag::eMESH_BOTH_SIDES;
const bool doBackfaceCulling = !isDoubleSided && !meshBothSides;
const bool anyHit = hitFlags & PxHitFlag::eMESH_ANY;
const bool testInitialOverlap = !(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP);
// PT: we can fallback to sphere sweep:
// - if the capsule is degenerate (i.e. it's a sphere)
// - if the sweep direction is the same as the capsule axis, in which case we can just sweep the top or bottom sphere
const PxVec3 extrusionDir = (capsule.p0 - capsule.p1)*0.5f; // Extrusion dir = capsule segment
const PxReal halfHeight = extrusionDir.magnitude();
bool mustExtrude = halfHeight!=0.0f;
if(!mustExtrude)
{
// PT: capsule is a sphere. Switch to sphere path (intersectCapsuleTriangle doesn't work for degenerate capsules)
return sweepSphereTriangles(nbTris, triangles, capsule.p0, capsule.radius, unitDir, distance, cachedIndex, hit, triNormalOut, isDoubleSided, meshBothSides, anyHit, testInitialOverlap);
}
else
{
const PxVec3 capsuleAxis = extrusionDir/halfHeight;
const PxReal colinearity = PxAbs(capsuleAxis.dot(unitDir));
mustExtrude = (colinearity < (1.0f - COLINEARITY_EPSILON));
}
const PxVec3 capsuleCenter = capsule.computeCenter();
if(!mustExtrude)
{
CapsuleTriangleOverlapData params;
params.init(capsule);
// PT: unfortunately we need to do IO test with the *capsule*, even though we're in the sphere codepath. So we
// can't directly reuse the sphere function.
const PxVec3 sphereCenter = capsuleCenter + unitDir * halfHeight;
// PT: this is a copy of 'sweepSphereTriangles' but with a capsule IO test. Saves double backface culling....
{
PxU32 index = PX_INVALID_U32;
const PxU32 initIndex = getInitIndex(cachedIndex, nbTris);
PxReal curT = distance;
const PxReal dpc0 = sphereCenter.dot(unitDir);
PxReal bestAlignmentValue = 2.0f;
PxVec3 bestTriNormal(0.0f);
for(PxU32 ii=0; ii<nbTris; ii++) // We need i for returned triangle index
{
const PxU32 i = getTriangleIndex(ii, initIndex);
const PxTriangle& currentTri = triangles[i];
if(rejectTriangle(sphereCenter, unitDir, curT, capsule.radius, currentTri.verts, dpc0))
continue;
PxVec3 triNormal;
currentTri.denormalizedNormal(triNormal);
// Backface culling
if(doBackfaceCulling && (triNormal.dot(unitDir) > 0.0f))
continue;
if(testInitialOverlap && intersectCapsuleTriangle(triNormal, currentTri.verts[0], currentTri.verts[1], currentTri.verts[2], capsule, params))
{
triNormalOut = -unitDir;
return setInitialOverlapResults(hit, unitDir, i);
}
const PxReal magnitude = triNormal.magnitude();
if(magnitude==0.0f)
continue;
triNormal /= magnitude;
PxReal currentDistance;
bool unused;
if(!sweepSphereVSTri(currentTri.verts, triNormal, sphereCenter, capsule.radius, unitDir, currentDistance, unused, false))
continue;
const PxReal hitDot = computeAlignmentValue(triNormal, unitDir);
if(keepTriangle(currentDistance, hitDot, curT, bestAlignmentValue, distance))
{
curT = PxMin(curT, currentDistance); // exact lower bound
index = i;
bestAlignmentValue = hitDot;
bestTriNormal = triNormal;
if(anyHit)
break;
}
//
else if(keepTriangleBasic(currentDistance, curT, distance))
{
curT = PxMin(curT, currentDistance); // exact lower bound
}
//
}
return computeSphereTriangleImpactData(hit, triNormalOut, index, curT, sphereCenter, unitDir, bestTriNormal, triangles, isDoubleSided, meshBothSides);
}
}
// PT: extrude mesh on the fly. This is a modified copy of sweepSphereTriangles, unfortunately
PxTriangle extrudedTris[7];
PxVec3 extrudedTrisNormals[7]; // Not normalized
hit.faceIndex = PX_INVALID_U32;
const PxU32 initIndex = getInitIndex(cachedIndex, nbTris);
const PxReal radius = capsule.radius;
PxReal curT = distance;
const PxReal dpc0 = capsuleCenter.dot(unitDir);
// PT: we will copy the best triangle here. Using indices alone doesn't work
// since we extrude on-the-fly (and we don't want to re-extrude later)
PxTriangle bestTri;
PxVec3 bestTriNormal(0.0f);
PxReal mostOpposingHitDot = 2.0f;
CapsuleTriangleOverlapData params;
params.init(capsule);
for(PxU32 ii=0; ii<nbTris; ii++) // We need i for returned triangle index
{
const PxU32 i = getTriangleIndex(ii, initIndex);
const PxTriangle& currentSrcTri = triangles[i]; // PT: src tri, i.e. non-extruded
///////////// PT: this part comes from "ExtrudeMesh"
// Create triangle normal
PxVec3 denormalizedNormal;
currentSrcTri.denormalizedNormal(denormalizedNormal);
// Backface culling
if(doBackfaceCulling && (denormalizedNormal.dot(unitDir) > 0.0f))
continue;
if(cullBox)
{
if(!intersectTriangleBox(*cullBox, currentSrcTri.verts[0], currentSrcTri.verts[1], currentSrcTri.verts[2]))
continue;
}
if(testInitialOverlap && intersectCapsuleTriangle(denormalizedNormal, currentSrcTri.verts[0], currentSrcTri.verts[1], currentSrcTri.verts[2], capsule, params))
{
triNormalOut = -unitDir;
return setInitialOverlapResults(hit, unitDir, i);
}
// Extrude mesh on the fly
PxU32 nbExtrudedTris=0;
const PxVec3 p0 = currentSrcTri.verts[0] - extrusionDir;
const PxVec3 p1 = currentSrcTri.verts[1] - extrusionDir;
const PxVec3 p2 = currentSrcTri.verts[2] - extrusionDir;
const PxVec3 p0b = currentSrcTri.verts[0] + extrusionDir;
const PxVec3 p1b = currentSrcTri.verts[1] + extrusionDir;
const PxVec3 p2b = currentSrcTri.verts[2] + extrusionDir;
if(denormalizedNormal.dot(extrusionDir) >= 0.0f) OUTPUT_TRI(p0b, p1b, p2b)
else OUTPUT_TRI(p0, p1, p2)
// ### it's probably useless to extrude all the shared edges !!!!!
//if(CurrentFlags & TriangleCollisionFlag::eACTIVE_EDGE12)
{
OUTPUT_TRI2(p1, p1b, p2b, unitDir)
OUTPUT_TRI2(p1, p2b, p2, unitDir)
}
//if(CurrentFlags & TriangleCollisionFlag::eACTIVE_EDGE20)
{
OUTPUT_TRI2(p0, p2, p2b, unitDir)
OUTPUT_TRI2(p0, p2b, p0b, unitDir)
}
//if(CurrentFlags & TriangleCollisionFlag::eACTIVE_EDGE01)
{
OUTPUT_TRI2(p0b, p1b, p1, unitDir)
OUTPUT_TRI2(p0b, p1, p0, unitDir)
}
/////////////
// PT: TODO: this one is new, to fix the tweak issue. However this wasn't
// here before so the perf hit should be analyzed.
denormalizedNormal.normalize();
const PxReal hitDot1 = computeAlignmentValue(denormalizedNormal, unitDir);
#ifdef NEW_VERSION
float localDistance = FLT_MAX;
PxU32 localIndex = 0xffffffff;
#endif
for(PxU32 j=0;j<nbExtrudedTris;j++)
{
const PxTriangle& currentTri = extrudedTris[j];
PxVec3& triNormal = extrudedTrisNormals[j];
// Backface culling
if(doBackfaceCulling && (triNormal.dot(unitDir)) > 0.0f)
continue;
// PT: beware, culling is only ok on the sphere I think
if(rejectTriangle(capsuleCenter, unitDir, curT, radius, currentTri.verts, dpc0))
continue;
const PxReal magnitude = triNormal.magnitude();
if(magnitude==0.0f)
continue;
triNormal /= magnitude;
PxReal currentDistance;
bool unused;
if(!sweepSphereVSTri(currentTri.verts, triNormal, capsuleCenter, radius, unitDir, currentDistance, unused, false))
continue;
#ifndef NEW_VERSION
if(keepTriangle(currentDistance, hitDot1, curT, mostOpposingHitDot, distance))
{
curT = PxMin(curT, currentDistance); // exact lower bound
hit.faceIndex = i;
mostOpposingHitDot = hitDot1; // arbitrary bias. works for hitDot1=-1, prevHitDot=0
bestTri = currentTri;
bestTriNormal = denormalizedNormal;
if(anyHit)
goto Exit; // PT: using goto to have one test per hit, not test per triangle ('break' doesn't work here)
}
//
else if(keepTriangleBasic(currentDistance, curT, distance))
{
curT = PxMin(curT, currentDistance); // exact lower bound
}
//
#endif
#ifdef NEW_VERSION
if(keepTriangleBasic(currentDistance, localDistance, distance))
{
localDistance = currentDistance;
localIndex = j;
}
#endif
}
#ifdef NEW_VERSION
if(localIndex!=0xffffffff)
{
if(keepTriangle(localDistance, hitDot1, curT, mostOpposingHitDot, distance))
{
curT = PxMin(curT, localDistance); // exact lower bound
hit.faceIndex = i;
mostOpposingHitDot = hitDot1; // arbitrary bias. works for hitDot1=-1, prevHitDot=0
bestTri = currentSrcTri;
bestTriNormal = denormalizedNormal;
if(anyHit)
goto Exit; // PT: using goto to have one test per hit, not test per triangle ('break' doesn't work here)
}
//
else if(keepTriangleBasic(localDistance, curT, distance))
{
curT = PxMin(curT, localDistance); // exact lower bound
}
}
#endif
}
Exit:
if(hit.faceIndex==PX_INVALID_U32)
return false; // We didn't touch any triangle
hit.distance = curT;
triNormalOut = bestTriNormal;
// Compute impact data only once, using best triangle
computeSphereTriImpactData(hit.position, hit.normal, capsuleCenter, unitDir, hit.distance, bestTri);
// PT: by design, returned normal is opposed to the sweep direction.
if(shouldFlipNormal(hit.normal, meshBothSides, isDoubleSided, bestTriNormal, unitDir))
hit.normal = -hit.normal;
// PT: revisit this
if(hit.faceIndex!=PX_INVALID_U32)
{
// PT: we need to recompute a hit here because the hit between the *capsule* and the source mesh can be very
// different from the hit between the *sphere* and the extruded mesh.
// Touched tri
const PxVec3& p0 = triangles[hit.faceIndex].verts[0];
const PxVec3& p1 = triangles[hit.faceIndex].verts[1];
const PxVec3& p2 = triangles[hit.faceIndex].verts[2];
// AP: measured to be a bit faster than the scalar version
const PxVec3 delta = unitDir*hit.distance;
Vec3V pointOnSeg, pointOnTri;
distanceSegmentTriangleSquared(
V3LoadU(capsule.p0 + delta), V3LoadU(capsule.p1 + delta),
V3LoadU(p0), V3LoadU(p1), V3LoadU(p2),
pointOnSeg, pointOnTri);
V3StoreU(pointOnTri, hit.position);
hit.flags = PxHitFlag::eNORMAL|PxHitFlag::ePOSITION;
}
return true;
}
| 13,508 | C++ | 34.088312 | 186 | 0.708543 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepBoxTriangle_FeatureBased.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SWEEP_BOX_TRIANGLE_FEATURE_BASED_H
#define GU_SWEEP_BOX_TRIANGLE_FEATURE_BASED_H
#include "foundation/PxVec3.h"
#include "foundation/PxPlane.h"
namespace physx
{
class PxTriangle;
namespace Gu
{
/**
Sweeps a box against a triangle, using a 'feature-based' approach.
This is currently only used for computing the box-sweep impact data, in a second pass,
after the best triangle has been identified using faster approaches (SAT/GJK).
\warning Returned impact normal is not normalized
\param tri [in] the triangle
\param box [in] the box
\param motion [in] (box) motion vector
\param oneOverMotion [in] precomputed inverse of motion vector
\param hit [out] impact point
\param normal [out] impact normal (warning: not normalized)
\param d [in/out] impact distance (please initialize with best current distance)
\param isDoubleSided [in] whether triangle is double-sided or not
\return true if an impact has been found
*/
bool sweepBoxTriangle( const PxTriangle& tri, const PxBounds3& box,
const PxVec3& motion, const PxVec3& oneOverMotion,
PxVec3& hit, PxVec3& normal, PxReal& d, bool isDoubleSided=false);
} // namespace Gu
}
#endif
| 2,916 | C | 43.196969 | 88 | 0.75 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepSphereTriangle.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSweepSphereTriangle.h"
#include "GuIntersectionRaySphere.h"
#include "GuIntersectionRayCapsule.h"
#include "GuIntersectionRayTriangle.h"
#include "GuCapsule.h"
#include "GuInternal.h"
#include "foundation/PxUtilities.h"
#include "GuDistancePointTriangle.h"
//#define PX_2413_FIX // Works in VT, but UT fails
#ifdef PX_2413_FIX
#define FIXUP_UVS u += du; v += dv;
#else
#define FIXUP_UVS
#endif
static const bool gSanityCheck = false;
//static const float gEpsilon = 0.1f;
#define gEpsilon 0.1f // PT: because otherwise compiler complains that this is unused
// PT: alternative version that checks 2 capsules max and avoids the questionable heuristic and the whole du/dv fix
static const bool gUseAlternativeImplementation = true;
using namespace physx;
using namespace Gu;
// PT: using GU_CULLING_EPSILON_RAY_TRIANGLE fails here, in capsule-vs-mesh's triangle extrusion, when
// the sweep dir is almost the same as the capsule's dir (i.e. when we usually fallback to the sphere codepath).
// I suspect det becomes so small that we lose all accuracy when dividing by det and using the result in computing
// impact distance.
#define LOCAL_EPSILON 0.00001f
// PT: special version computing (u,v) even when the ray misses the tri. Version working on precomputed edges.
static PX_FORCE_INLINE PxU32 rayTriSpecial(const PxVec3& orig, const PxVec3& dir, const PxVec3& vert0, const PxVec3& edge1, const PxVec3& edge2, PxReal& t, PxReal& u, PxReal& v)
{
// Begin calculating determinant - also used to calculate U parameter
const PxVec3 pvec = dir.cross(edge2);
// If determinant is near zero, ray lies in plane of triangle
const PxReal det = edge1.dot(pvec);
// the non-culling branch
// if(det>-GU_CULLING_EPSILON_RAY_TRIANGLE && det<GU_CULLING_EPSILON_RAY_TRIANGLE)
if(det>-LOCAL_EPSILON && det<LOCAL_EPSILON)
return 0;
const PxReal oneOverDet = 1.0f / det;
// Calculate distance from vert0 to ray origin
const PxVec3 tvec = orig - vert0;
// Calculate U parameter
u = (tvec.dot(pvec)) * oneOverDet;
// prepare to test V parameter
const PxVec3 qvec = tvec.cross(edge1);
// Calculate V parameter
v = (dir.dot(qvec)) * oneOverDet;
if(u<0.0f || u>1.0f)
return 1;
if(v<0.0f || u+v>1.0f)
return 1;
// Calculate t, ray intersects triangle
t = (edge2.dot(qvec)) * oneOverDet;
return 2;
}
#ifdef PX_2413_FIX
static PX_FORCE_INLINE PxU32 rayTriSpecial3(const PxVec3& orig, const PxVec3& offset, const PxVec3& dir, const PxVec3& vert0, const PxVec3& edge1, const PxVec3& edge2, PxReal& t, PxReal& u, PxReal& v, PxReal& du, PxReal& dv)
{
// Begin calculating determinant - also used to calculate U parameter
const PxVec3 pvec = dir.cross(edge2);
// If determinant is near zero, ray lies in plane of triangle
const PxReal det = edge1.dot(pvec);
// the non-culling branch
// if(det>-GU_CULLING_EPSILON_RAY_TRIANGLE && det<GU_CULLING_EPSILON_RAY_TRIANGLE)
if(det>-LOCAL_EPSILON && det<LOCAL_EPSILON)
return 0;
const PxReal oneOverDet = 1.0f / det;
// Calculate distance from vert0 to ray origin
const PxVec3 tvec = orig - vert0;
// Calculate U parameter
u = (tvec.dot(pvec)) * oneOverDet;
// prepare to test V parameter
const PxVec3 qvec = tvec.cross(edge1);
// Calculate V parameter
v = (dir.dot(qvec)) * oneOverDet;
if(u<0.0f || u>1.0f || v<0.0f || u+v>1.0f)
{
du = (offset.dot(pvec)) * oneOverDet;
const PxVec3 qvec = offset.cross(edge1);
dv = (dir.dot(qvec)) * oneOverDet;
return 1;
}
// Calculate t, ray intersects triangle
t = (edge2.dot(qvec)) * oneOverDet;
return 2;
}
#endif
// Returns true if sphere can be tested against triangle vertex, false if edge test should be performed
//
// Uses a conservative approach to work for "sliver triangles" (long & thin) as well.
static PX_FORCE_INLINE bool edgeOrVertexTest(const PxVec3& planeIntersectPoint, const PxVec3* PX_RESTRICT tri, PxU32 vertIntersectCandidate, PxU32 vert0, PxU32 vert1, PxU32& secondEdgeVert)
{
{
const PxVec3 edge0 = tri[vertIntersectCandidate] - tri[vert0];
const PxReal edge0LengthSqr = edge0.dot(edge0);
const PxVec3 diff = planeIntersectPoint - tri[vert0];
if (edge0.dot(diff) < edge0LengthSqr) // If the squared edge length is used for comparison, the edge vector does not need to be normalized
{
secondEdgeVert = vert0;
return false;
}
}
{
const PxVec3 edge1 = tri[vertIntersectCandidate] - tri[vert1];
const PxReal edge1LengthSqr = edge1.dot(edge1);
const PxVec3 diff = planeIntersectPoint - tri[vert1];
if (edge1.dot(diff) < edge1LengthSqr)
{
secondEdgeVert = vert1;
return false;
}
}
return true;
}
static PX_FORCE_INLINE bool testRayVsSphereOrCapsule(PxReal& impactDistance, bool testSphere, const PxVec3& center, PxReal radius, const PxVec3& dir, const PxVec3* PX_RESTRICT verts, PxU32 e0, PxU32 e1)
{
if(testSphere)
{
PxReal t;
if(intersectRaySphere(center, dir, PX_MAX_F32, verts[e0], radius, t))
{
impactDistance = t;
return true;
}
}
else
{
PxReal t;
if(intersectRayCapsule(center, dir, verts[e0], verts[e1], radius, t))
{
if(t>=0.0f/* && t<MinDist*/)
{
impactDistance = t;
return true;
}
}
}
return false;
}
bool Gu::sweepSphereVSTri(const PxVec3* PX_RESTRICT triVerts, const PxVec3& normal, const PxVec3& center, PxReal radius, const PxVec3& dir, PxReal& impactDistance, bool& directHit, bool testInitialOverlap)
{
// Ok, this new version is now faster than the original code. Needs more testing though.
directHit = false;
const PxVec3 edge10 = triVerts[1] - triVerts[0];
const PxVec3 edge20 = triVerts[2] - triVerts[0];
if(testInitialOverlap) // ### brute force version that always works, but we can probably do better
{
const PxVec3 cp = closestPtPointTriangle2(center, triVerts[0], triVerts[1], triVerts[2], edge10, edge20);
if((cp - center).magnitudeSquared() <= radius*radius)
{
impactDistance = 0.0f;
return true;
}
}
#define INTERSECT_POINT (triVerts[1]*u) + (triVerts[2]*v) + (triVerts[0] * (1.0f-u-v))
PxReal u,v;
#ifdef PX_2413_FIX
float du, dv;
#endif
{
PxVec3 R = normal * radius;
if(dir.dot(R) >= 0.0f)
R = -R;
// The first point of the sphere to hit the triangle plane is the point of the sphere nearest to
// the triangle plane. Hence, we use center - (normal*radius) below.
// PT: casting against the extruded triangle in direction R is the same as casting from a ray moved by -R
PxReal t;
#ifdef PX_2413_FIX
const PxU32 r = rayTriSpecial3(center-R, R, dir, triVerts[0], edge10, edge20, t, u, v, du, dv);
#else
const PxU32 r = rayTriSpecial(center-R, dir, triVerts[0], edge10, edge20, t, u, v);
#endif
if(!r)
return false;
if(r==2)
{
if(t<0.0f)
return false;
impactDistance = t;
directHit = true;
return true;
}
}
float referenceMinDist = PX_MAX_F32;
bool referenceHit = false;
if(gSanityCheck)
{
PxReal t;
if(intersectRayCapsule(center, dir, triVerts[0], triVerts[1], radius, t) && t>=0.0f)
{
referenceHit = true;
referenceMinDist = PxMin(referenceMinDist, t);
}
if(intersectRayCapsule(center, dir, triVerts[1], triVerts[2], radius, t) && t>=0.0f)
{
referenceHit = true;
referenceMinDist = PxMin(referenceMinDist, t);
}
if(intersectRayCapsule(center, dir, triVerts[2], triVerts[0], radius, t) && t>=0.0f)
{
referenceHit = true;
referenceMinDist = PxMin(referenceMinDist, t);
}
if(!gUseAlternativeImplementation)
{
if(referenceHit)
impactDistance = referenceMinDist;
return referenceHit;
}
}
//
// Let's do some art!
//
// The triangle gets divided into the following areas (based on the barycentric coordinates (u,v)):
//
// \ A0 /
// \ /
// \ /
// \/ 0
// A02 * A01
// u / / \ \ v
// * / \ *
// / \ .
// 2 / \ 1
// ------*--------------*-------
// / \ .
// A2 / A12 \ A1
//
//
// Based on the area where the computed triangle plane intersection point lies in, a different sweep test will be applied.
//
// A) A01, A02, A12 : Test sphere against the corresponding edge
// B) A0, A1, A2 : Test sphere against the corresponding vertex
//
// Unfortunately, B) does not work for long, thin triangles. Hence there is some extra code which does a conservative check and
// switches to edge tests if necessary.
//
if(gUseAlternativeImplementation)
{
bool testTwoEdges = false;
PxU32 e0,e1,e2=0;
if(u<0.0f)
{
if(v<0.0f)
{
// 0 or 0-1 or 0-2
testTwoEdges = true;
e0 = 0;
e1 = 1;
e2 = 2;
}
else if(u+v>1.0f)
{
// 2 or 2-0 or 2-1
testTwoEdges = true;
e0 = 2;
e1 = 0;
e2 = 1;
}
else
{
// 0-2
e0 = 0;
e1 = 2;
}
}
else
{
if(v<0.0f)
{
if(u+v>1.0f)
{
// 1 or 1-0 or 1-2
testTwoEdges = true;
e0 = 1;
e1 = 0;
e2 = 2;
}
else
{
// 0-1
e0 = 0;
e1 = 1;
}
}
else
{
PX_ASSERT(u+v>=1.0f); // Else hit triangle
// 1-2
e0 = 1;
e1 = 2;
}
}
bool hit = false;
PxReal t;
if(intersectRayCapsule(center, dir, triVerts[e0], triVerts[e1], radius, t) && t>=0.0f)
{
impactDistance = t;
hit = true;
}
if(testTwoEdges && intersectRayCapsule(center, dir, triVerts[e0], triVerts[e2], radius, t) && t>=0.0f)
{
if(!hit || t<impactDistance)
{
impactDistance = t;
hit = true;
}
}
if(gSanityCheck)
{
PX_ASSERT(referenceHit==hit);
if(referenceHit==hit)
PX_ASSERT(fabsf(referenceMinDist-impactDistance)<gEpsilon);
}
return hit;
}
else
{
bool TestSphere;
PxU32 e0,e1;
if(u<0.0f)
{
if(v<0.0f)
{
// 0 or 0-1 or 0-2
e0 = 0;
FIXUP_UVS
const PxVec3 intersectPoint = INTERSECT_POINT;
TestSphere = edgeOrVertexTest(intersectPoint, triVerts, 0, 1, 2, e1);
}
else if(u+v>1.0f)
{
// 2 or 2-0 or 2-1
e0 = 2;
FIXUP_UVS
const PxVec3 intersectPoint = INTERSECT_POINT;
TestSphere = edgeOrVertexTest(intersectPoint, triVerts, 2, 0, 1, e1);
}
else
{
// 0-2
TestSphere = false;
e0 = 0;
e1 = 2;
}
}
else
{
if(v<0.0f)
{
if(u+v>1.0f)
{
// 1 or 1-0 or 1-2
e0 = 1;
FIXUP_UVS
const PxVec3 intersectPoint = INTERSECT_POINT;
TestSphere = edgeOrVertexTest(intersectPoint, triVerts, 1, 0, 2, e1);
}
else
{
// 0-1
TestSphere = false;
e0 = 0;
e1 = 1;
}
}
else
{
PX_ASSERT(u+v>=1.0f); // Else hit triangle
// 1-2
TestSphere = false;
e0 = 1;
e1 = 2;
}
}
return testRayVsSphereOrCapsule(impactDistance, TestSphere, center, radius, dir, triVerts, e0, e1);
}
}
bool Gu::sweepSphereTriangles( PxU32 nbTris, const PxTriangle* PX_RESTRICT triangles, // Triangle data
const PxVec3& center, const PxReal radius, // Sphere data
const PxVec3& unitDir, PxReal distance, // Ray data
const PxU32* PX_RESTRICT cachedIndex, // Cache data
PxGeomSweepHit& h, PxVec3& triNormalOut, // Results
bool isDoubleSided, bool meshBothSides, bool anyHit, bool testInitialOverlap) // Query modifiers
{
if(!nbTris)
return false;
const bool doBackfaceCulling = !isDoubleSided && !meshBothSides;
PxU32 index = PX_INVALID_U32;
const PxU32 initIndex = getInitIndex(cachedIndex, nbTris);
PxReal curT = distance;
const PxReal dpc0 = center.dot(unitDir);
PxReal bestAlignmentValue = 2.0f;
PxVec3 bestTriNormal(0.0f);
for(PxU32 ii=0; ii<nbTris; ii++) // We need i for returned triangle index
{
const PxU32 i = getTriangleIndex(ii, initIndex);
const PxTriangle& currentTri = triangles[i];
if(rejectTriangle(center, unitDir, curT, radius, currentTri.verts, dpc0))
continue;
PxVec3 triNormal;
currentTri.denormalizedNormal(triNormal);
// Backface culling
if(doBackfaceCulling && (triNormal.dot(unitDir) > 0.0f))
continue;
const PxReal magnitude = triNormal.magnitude();
if(magnitude==0.0f)
continue;
triNormal /= magnitude;
PxReal currentDistance;
bool unused;
if(!sweepSphereVSTri(currentTri.verts, triNormal, center, radius, unitDir, currentDistance, unused, testInitialOverlap))
continue;
const PxReal hitDot = computeAlignmentValue(triNormal, unitDir);
if(keepTriangle(currentDistance, hitDot, curT, bestAlignmentValue, distance))
{
if(currentDistance==0.0f)
{
triNormalOut = -unitDir;
return setInitialOverlapResults(h, unitDir, i);
}
curT = PxMin(curT, currentDistance); // exact lower bound
index = i;
bestAlignmentValue = hitDot;
bestTriNormal = triNormal;
if(anyHit)
break;
}
//
else if(keepTriangleBasic(currentDistance, curT, distance))
{
curT = PxMin(curT, currentDistance); // exact lower bound
}
//
}
return computeSphereTriangleImpactData(h, triNormalOut, index, curT, center, unitDir, bestTriNormal, triangles, isDoubleSided, meshBothSides);
}
static PX_FORCE_INLINE PxU32 rayQuadSpecial2(const PxVec3& orig, const PxVec3& dir, const PxVec3& vert0, const PxVec3& edge1, const PxVec3& edge2, float& t, float& u, float& v)
{
// Begin calculating determinant - also used to calculate U parameter
const PxVec3 pvec = dir.cross(edge2);
// If determinant is near zero, ray lies in plane of triangle
const float det = edge1.dot(pvec);
// the non-culling branch
if(det>-LOCAL_EPSILON && det<LOCAL_EPSILON)
return 0;
const float oneOverDet = 1.0f / det;
// Calculate distance from vert0 to ray origin
const PxVec3 tvec = orig - vert0;
// Calculate U parameter
u = tvec.dot(pvec) * oneOverDet;
// prepare to test V parameter
const PxVec3 qvec = tvec.cross(edge1);
// Calculate V parameter
v = dir.dot(qvec) * oneOverDet;
if(u<0.0f || u>1.0f)
return 1;
if(v<0.0f || v>1.0f)
return 1;
// Calculate t, ray intersects triangle
t = edge2.dot(qvec) * oneOverDet;
return 2;
}
#ifdef PX_2413_FIX
static PX_FORCE_INLINE PxU32 rayQuadSpecial3(const PxVec3& orig, const PxVec3& offset, const PxVec3& dir, const PxVec3& vert0, const PxVec3& edge1, const PxVec3& edge2, float& t, float& u, float& v, float& du, float& dv)
{
// Begin calculating determinant - also used to calculate U parameter
const PxVec3 pvec = dir.cross(edge2);
// If determinant is near zero, ray lies in plane of triangle
const float det = edge1.dot(pvec);
// the non-culling branch
if(det>-LOCAL_EPSILON && det<LOCAL_EPSILON)
return 0;
const float oneOverDet = 1.0f / det;
// Calculate distance from vert0 to ray origin
const PxVec3 tvec = orig - vert0;
// Calculate U parameter
u = tvec.dot(pvec) * oneOverDet;
// prepare to test V parameter
const PxVec3 qvec = tvec.cross(edge1);
// Calculate V parameter
v = dir.dot(qvec) * oneOverDet;
if(u<0.0f || u>1.0f || v<0.0f || v>1.0f)
{
du = (offset.dot(pvec)) * oneOverDet;
const PxVec3 qvec = offset.cross(edge1);
dv = (dir.dot(qvec)) * oneOverDet;
return 1;
}
// Calculate t, ray intersects triangle
t = edge2.dot(qvec) * oneOverDet;
return 2;
}
#endif
bool Gu::sweepSphereVSQuad(const PxVec3* PX_RESTRICT quadVerts, const PxVec3& normal, const PxVec3& center, float radius, const PxVec3& dir, float& impactDistance)
{
// Quad formed by 2 tris:
// p0 p1 p2
// p2 p1 p3 = p3 p2 p1
//
// p0___p2
// | /|
// | / |
// | / |
// |/ |
// p1---p3
//
// Edge10 = p1 - p0
// Edge20 = p2 - p0
// Impact point = Edge10*u + Edge20*v + p0
// => u is along Y, between 0.0 (p0;p2) and 1.0 (p1;p3)
// => v is along X, between 0.0 (p0;p1) and 1.0 (p2;p3)
//
// For the second triangle,
// Edge10b = p2 - p3 = -Edge10
// Edge20b = p1 - p3 = -Edge20
const PxVec3 Edge10 = quadVerts[1] - quadVerts[0];
const PxVec3 Edge20 = quadVerts[2] - quadVerts[0];
if(1) // ### brute force version that always works, but we can probably do better
{
const float r2 = radius*radius;
{
const PxVec3 Cp = closestPtPointTriangle2(center, quadVerts[0], quadVerts[1], quadVerts[2], Edge10, Edge20);
if((Cp - center).magnitudeSquared() <= r2)
{
impactDistance = 0.0f;
return true;
}
}
{
const PxVec3 Cp = closestPtPointTriangle2(center, quadVerts[3], quadVerts[2], quadVerts[1], -Edge10, -Edge20);
if((Cp - center).magnitudeSquared() <= r2)
{
impactDistance = 0.0f;
return true;
}
}
}
float u,v;
#ifdef PX_2413_FIX
float du, dv;
#endif
if(1)
{
PxVec3 R = normal * radius;
if(dir.dot(R) >= 0.0f)
R = -R;
// The first point of the sphere to hit the quad plane is the point of the sphere nearest to
// the quad plane. Hence, we use center - (normal*radius) below.
// PT: casting against the extruded quad in direction R is the same as casting from a ray moved by -R
float t;
#ifdef PX_2413_FIX
const PxU32 r = rayQuadSpecial3(center-R, R, dir, quadVerts[0], Edge10, Edge20, t, u, v, du, dv);
#else
PxU32 r = rayQuadSpecial2(center-R, dir, quadVerts[0], Edge10, Edge20, t, u, v);
#endif
if(!r)
return false;
if(r==2)
{
if(t<0.0f)
return false;
impactDistance = t;
return true;
}
}
bool referenceHit = false;
float referenceMinDist = PX_MAX_F32;
if(gSanityCheck)
{
PxReal t;
if(intersectRayCapsule(center, dir, quadVerts[0], quadVerts[1], radius, t) && t>=0.0f)
{
referenceHit = true;
referenceMinDist = PxMin(referenceMinDist, t);
}
if(intersectRayCapsule(center, dir, quadVerts[1], quadVerts[3], radius, t) && t>=0.0f)
{
referenceHit = true;
referenceMinDist = PxMin(referenceMinDist, t);
}
if(intersectRayCapsule(center, dir, quadVerts[3], quadVerts[2], radius, t) && t>=0.0f)
{
referenceHit = true;
referenceMinDist = PxMin(referenceMinDist, t);
}
if(intersectRayCapsule(center, dir, quadVerts[2], quadVerts[0], radius, t) && t>=0.0f)
{
referenceHit = true;
referenceMinDist = PxMin(referenceMinDist, t);
}
if(!gUseAlternativeImplementation)
{
if(referenceHit)
impactDistance = referenceMinDist;
return referenceHit;
}
}
PxSwap(u, v);
if(gUseAlternativeImplementation)
{
bool testTwoEdges = false;
PxU32 e0,e1,e2=0;
if(u<0.0f)
{
if(v<0.0f)
{
// 0 or 0-1 or 0-2
testTwoEdges = true;
e0 = 0;
e1 = 1;
e2 = 2;
}
else if(v>1.0f)
{
// 1 or 1-0 or 1-3
testTwoEdges = true;
e0 = 1;
e1 = 0;
e2 = 3;
}
else
{
// 0-1
e0 = 0;
e1 = 1;
}
}
else if(u>1.0f)
{
if(v<0.0f)
{
// 2 or 2-0 or 2-3
testTwoEdges = true;
e0 = 2;
e1 = 0;
e2 = 3;
}
else if(v>1.0f)
{
// 3 or 3-1 or 3-2
testTwoEdges = true;
e0 = 3;
e1 = 1;
e2 = 2;
}
else
{
// 2-3
e0 = 2;
e1 = 3;
}
}
else
{
if(v<0.0f)
{
// 0-2
e0 = 0;
e1 = 2;
}
else
{
PX_ASSERT(v>=1.0f); // Else hit quad
// 1-3
e0 = 1;
e1 = 3;
}
}
bool hit = false;
PxReal t;
if(intersectRayCapsule(center, dir, quadVerts[e0], quadVerts[e1], radius, t) && t>=0.0f)
{
impactDistance = t;
hit = true;
}
if(testTwoEdges && intersectRayCapsule(center, dir, quadVerts[e0], quadVerts[e2], radius, t) && t>=0.0f)
{
if(!hit || t<impactDistance)
{
impactDistance = t;
hit = true;
}
}
if(gSanityCheck)
{
PX_ASSERT(referenceHit==hit);
if(referenceHit==hit)
PX_ASSERT(fabsf(referenceMinDist-impactDistance)<gEpsilon);
}
return hit;
}
else
{
#define INTERSECT_POINT_Q (quadVerts[1]*u) + (quadVerts[2]*v) + (quadVerts[0] * (1.0f-u-v))
bool TestSphere;
PxU32 e0,e1;
if(u<0.0f)
{
if(v<0.0f)
{
// 0 or 0-1 or 0-2
e0 = 0;
FIXUP_UVS
const PxVec3 intersectPoint = INTERSECT_POINT_Q;
TestSphere = edgeOrVertexTest(intersectPoint, quadVerts, 0, 1, 2, e1);
}
else if(v>1.0f)
{
// 1 or 1-0 or 1-3
e0 = 1;
FIXUP_UVS
const PxVec3 intersectPoint = INTERSECT_POINT_Q;
TestSphere = edgeOrVertexTest(intersectPoint, quadVerts, 1, 0, 3, e1);
}
else
{
// 0-1
TestSphere = false;
e0 = 0;
e1 = 1;
}
}
else if(u>1.0f)
{
if(v<0.0f)
{
// 2 or 2-0 or 2-3
e0 = 2;
FIXUP_UVS
const PxVec3 intersectPoint = INTERSECT_POINT_Q;
TestSphere = edgeOrVertexTest(intersectPoint, quadVerts, 2, 0, 3, e1);
}
else if(v>1.0f)
{
// 3 or 3-1 or 3-2
e0 = 3;
FIXUP_UVS
const PxVec3 intersectPoint = INTERSECT_POINT_Q;
TestSphere = edgeOrVertexTest(intersectPoint, quadVerts, 3, 1, 2, e1);
}
else
{
// 2-3
TestSphere = false;
e0 = 2;
e1 = 3;
}
}
else
{
if(v<0.0f)
{
// 0-2
TestSphere = false;
e0 = 0;
e1 = 2;
}
else
{
PX_ASSERT(v>=1.0f); // Else hit quad
// 1-3
TestSphere = false;
e0 = 1;
e1 = 3;
}
}
return testRayVsSphereOrCapsule(impactDistance, TestSphere, center, radius, dir, quadVerts, e0, e1);
}
}
| 22,757 | C++ | 24.202658 | 224 | 0.643011 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepBoxSphere.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSweepBoxSphere.h"
#include "GuOverlapTests.h"
#include "GuSphere.h"
#include "GuBoxConversion.h"
#include "GuCapsule.h"
#include "GuIntersectionRayCapsule.h"
#include "GuIntersectionRayBox.h"
#include "GuIntersectionSphereBox.h"
#include "GuDistancePointSegment.h"
#include "GuInternal.h"
using namespace physx;
using namespace Gu;
namespace
{
// PT: TODO: get rid of this copy
static const PxVec3 gNearPlaneNormal[] =
{
PxVec3(1.0f, 0.0f, 0.0f),
PxVec3(0.0f, 1.0f, 0.0f),
PxVec3(0.0f, 0.0f, 1.0f),
PxVec3(-1.0f, 0.0f, 0.0f),
PxVec3(0.0f, -1.0f, 0.0f),
PxVec3(0.0f, 0.0f, -1.0f)
};
}
bool Gu::sweepBoxSphere(const Box& box, PxReal sphereRadius, const PxVec3& spherePos, const PxVec3& dir, PxReal length, PxReal& min_dist, PxVec3& normal, PxHitFlags hitFlags)
{
if(!(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP))
{
// PT: test if shapes initially overlap
if(intersectSphereBox(Sphere(spherePos, sphereRadius), box))
{
// Overlap
min_dist = 0.0f;
normal = -dir;
return true;
}
}
PxVec3 boxPts[8];
box.computeBoxPoints(boxPts);
const PxU8* PX_RESTRICT edges = getBoxEdges();
PxReal MinDist = length;
bool Status = false;
for(PxU32 i=0; i<12; i++)
{
const PxU8 e0 = *edges++;
const PxU8 e1 = *edges++;
const Capsule capsule(boxPts[e0], boxPts[e1], sphereRadius);
PxReal t;
if(intersectRayCapsule(spherePos, dir, capsule, t))
{
if(t>=0.0f && t<=MinDist)
{
MinDist = t;
const PxVec3 ip = spherePos + t*dir;
distancePointSegmentSquared(capsule, ip, &t);
PxVec3 ip2;
capsule.computePoint(ip2, t);
normal = (ip2 - ip);
normal.normalize();
Status = true;
}
}
}
PxVec3 localPt;
{
PxMat34 M2;
buildMatrixFromBox(M2, box);
localPt = M2.rotateTranspose(spherePos - M2.p);
}
const PxVec3* boxNormals = gNearPlaneNormal;
const PxVec3 localDir = box.rotateInv(dir);
// PT: when the box exactly touches the sphere, the test for initial overlap can fail on some platforms.
// In this case we reach the sweep code below, which may return a slightly negative time of impact (it should be 0.0
// but it ends up a bit negative because of limited FPU accuracy). The epsilon ensures that we correctly detect a hit
// in this case.
const PxReal epsilon = -1e-5f;
PxReal tnear, tfar;
PxVec3 extents = box.extents;
extents.x += sphereRadius;
int plane = intersectRayAABB(-extents, extents, localPt, localDir, tnear, tfar);
if(plane!=-1 && tnear>=epsilon && tnear <= MinDist)
{
MinDist = PxMax(tnear, 0.0f);
normal = box.rotate(boxNormals[plane]);
Status = true;
}
extents = box.extents;
extents.y += sphereRadius;
plane = intersectRayAABB(-extents, extents, localPt, localDir, tnear, tfar);
if(plane!=-1 && tnear>=epsilon && tnear <= MinDist)
{
MinDist = PxMax(tnear, 0.0f);
normal = box.rotate(boxNormals[plane]);
Status = true;
}
extents = box.extents;
extents.z += sphereRadius;
plane = intersectRayAABB(-extents, extents, localPt, localDir, tnear, tfar);
if(plane!=-1 && tnear>=epsilon && tnear <= MinDist)
{
MinDist = PxMax(tnear, 0.0f);
normal = box.rotate(boxNormals[plane]);
Status = true;
}
min_dist = MinDist;
return Status;
}
| 4,893 | C++ | 30.171974 | 174 | 0.712855 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepSphereTriangle.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SWEEP_SPHERE_TRIANGLE_H
#define GU_SWEEP_SPHERE_TRIANGLE_H
#include "GuSweepTriangleUtils.h"
namespace physx
{
namespace Gu
{
/**
Sweeps a sphere against a triangle.
All input parameters (sphere, triangle, sweep direction) must be in the same space. Sweep length is assumed to be infinite.
By default, 'testInitialOverlap' must be set to true to properly handle the case where the sphere already overlaps the triangle
at the start of the sweep. In such a case, returned impact distance is exactly 0.0f. If it is known ahead of time that the sphere
cannot overlap the triangle at t=0.0, then 'testInitialOverlap' can be set to false to skip the initial overlap test and make the
function run faster.
If the ray defined by the sphere's center and the unit direction directly intersects the triangle-related part of the TSS (*) (i.e.
the prism from the Minkowski sum of the inflated triangle) then 'directHit' is set to true. Otherwise it is set to false.
(*) For Triangle Swept Sphere, see http://gamma.cs.unc.edu/SSV/ssv.pdf for the origin of these names.
\param triVerts [in] triangle vertices
\param triUnitNormal [in] triangle's normalized normal
\param sphereCenter [in] sphere's center
\param sphereRadius [in] sphere's radius
\param unitDir [in] normalized sweep direction.
\param impactDistance [out] impact distance, if a hit has been found. Does not need to be initialized before calling the function.
\param directHit [out] true if a direct hit has been found, see comments above.
\param testInitialOverlap [in] true if an initial sphere-vs-triangle overlap test must be performed, see comments above.
\return true if an impact has been found (in which case returned result values are valid)
*/
bool sweepSphereVSTri( const PxVec3* PX_RESTRICT triVerts, const PxVec3& triUnitNormal,// Triangle data
const PxVec3& sphereCenter, PxReal sphereRadius, // Sphere data
const PxVec3& unitDir, // Ray data
PxReal& impactDistance, bool& directHit, // Results
bool testInitialOverlap); // Query modifier
/**
Sweeps a sphere against a quad.
All input parameters (sphere, quad, sweep direction) must be in the same space. Sweep length is assumed to be infinite.
Quad must be formed by 2 tris like this:
p0___p2
| /|
| / |
| / |
|/ |
p1---p3
\param quadVerts [in] quad vertices
\param quadUnitNormal [in] quad's normalized normal
\param sphereCenter [in] sphere's center
\param sphereRadius [in] sphere's radius
\param unitDir [in] normalized sweep direction.
\param impactDistance [out] impact distance, if a hit has been found. Does not need to be initialized before calling the function.
\return true if an impact has been found (in which case returned result values are valid)
*/
bool sweepSphereVSQuad( const PxVec3* PX_RESTRICT quadVerts, const PxVec3& quadUnitNormal, // Quad data
const PxVec3& sphereCenter, float sphereRadius, // Sphere data
const PxVec3& unitDir, // Ray data
float& impactDistance); // Results
// PT: computes proper impact data for sphere-sweep-vs-tri, after the closest tri has been found
PX_FORCE_INLINE bool computeSphereTriangleImpactData(PxGeomSweepHit& h, PxVec3& triNormalOut, PxU32 index, PxReal curT,
const PxVec3& center, const PxVec3& unitDir, const PxVec3& bestTriNormal,
const PxTriangle* PX_RESTRICT triangles,
bool isDoubleSided, bool meshBothSides)
{
if(index==PX_INVALID_U32)
return false; // We didn't touch any triangle
// Compute impact data only once, using best triangle
PxVec3 hitPos, normal;
computeSphereTriImpactData(hitPos, normal, center, unitDir, curT, triangles[index]);
// PT: by design, returned normal is opposed to the sweep direction.
if(shouldFlipNormal(normal, meshBothSides, isDoubleSided, bestTriNormal, unitDir))
normal = -normal;
h.position = hitPos;
h.normal = normal;
h.distance = curT;
h.faceIndex = index;
h.flags = PxHitFlag::eNORMAL|PxHitFlag::ePOSITION;
triNormalOut = bestTriNormal;
return true;
}
/**
Sweeps a sphere against a set of triangles.
\param nbTris [in] number of triangles in input array
\param triangles [in] array of input triangles
\param center [in] sphere's center
\param radius [in] sphere's radius
\param unitDir [in] sweep's unit direcion
\param distance [in] sweep's length
\param cachedIndex [in] cached triangle index, or NULL. Cached triangle will be tested first.
\param hit [out] results
\param triNormalOut [out] triangle normal
\param isDoubleSided [in] true if input triangles are double-sided
\param meshBothSides [in] true if PxHitFlag::eMESH_BOTH_SIDES is used
\param anyHit [in] true if PxHitFlag::eMESH_ANY is used
\param testInitialOverlap [in] true if PxHitFlag::eASSUME_NO_INITIAL_OVERLAP is not used
\return true if an impact has been found
*/
bool sweepSphereTriangles( PxU32 nbTris, const PxTriangle* PX_RESTRICT triangles, // Triangle data
const PxVec3& center, const PxReal radius, // Sphere data
const PxVec3& unitDir, PxReal distance, // Ray data
const PxU32* PX_RESTRICT cachedIndex, // Cache data
PxGeomSweepHit& hit, PxVec3& triNormalOut, // Results
bool isDoubleSided, bool meshBothSides, bool anyHit, bool testInitialOverlap); // Query modifiers
} // namespace Gu
}
#endif
| 7,221 | C | 45.294872 | 132 | 0.729816 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepCapsuleBox.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxBounds3.h"
#include "foundation/PxTransform.h"
#include "foundation/PxSIMDHelpers.h"
#include "geometry/PxTriangle.h"
#include "GuSweepCapsuleBox.h"
#include "GuSweepSphereTriangle.h"
#include "GuCapsule.h"
#include "GuDistanceSegmentBox.h"
#include "GuInternal.h"
#include "foundation/PxAlloca.h"
using namespace physx;
using namespace Gu;
namespace
{
/**
* Returns triangles.
* \return 36 indices (12 triangles) indexing the list returned by ComputePoints()
*/
static const PxU8* getBoxTriangles()
{
static PxU8 Indices[] = {
0,2,1, 0,3,2,
1,6,5, 1,2,6,
5,7,4, 5,6,7,
4,3,0, 4,7,3,
3,6,2, 3,7,6,
5,0,1, 5,4,0
};
return Indices;
}
}
#define OUTPUT_TRI(t, p0, p1, p2){ \
t->verts[0] = p0; \
t->verts[1] = p1; \
t->verts[2] = p2; \
t++;}
#define OUTPUT_TRI2(t, p0, p1, p2, d){ \
t->verts[0] = p0; \
t->verts[1] = p1; \
t->verts[2] = p2; \
t->denormalizedNormal(denormalizedNormal); \
if((denormalizedNormal.dot(d))>0.0f) { \
PxVec3 Tmp = t->verts[1]; \
t->verts[1] = t->verts[2]; \
t->verts[2] = Tmp; \
} \
t++; *ids++ = i; }
static PxU32 extrudeMesh( PxU32 nbTris, const PxTriangle* triangles,
const PxVec3& extrusionDir, PxTriangle* tris, PxU32* ids, const PxVec3& dir)
{
const PxU32* base = ids;
for(PxU32 i=0; i<nbTris; i++)
{
const PxTriangle& currentTriangle = triangles[i];
// Create triangle normal
PxVec3 denormalizedNormal;
currentTriangle.denormalizedNormal(denormalizedNormal);
// Backface culling
const bool culled = (denormalizedNormal.dot(dir)) > 0.0f;
if(culled) continue;
PxVec3 p0 = currentTriangle.verts[0];
PxVec3 p1 = currentTriangle.verts[1];
PxVec3 p2 = currentTriangle.verts[2];
PxVec3 p0b = p0 + extrusionDir;
PxVec3 p1b = p1 + extrusionDir;
PxVec3 p2b = p2 + extrusionDir;
p0 -= extrusionDir;
p1 -= extrusionDir;
p2 -= extrusionDir;
if(denormalizedNormal.dot(extrusionDir) >= 0.0f) OUTPUT_TRI(tris, p0b, p1b, p2b)
else OUTPUT_TRI(tris, p0, p1, p2)
*ids++ = i;
// ### it's probably useless to extrude all the shared edges !!!!!
//if(CurrentFlags & TriangleCollisionFlag::eACTIVE_EDGE12)
{
OUTPUT_TRI2(tris, p1, p1b, p2b, dir)
OUTPUT_TRI2(tris, p1, p2b, p2, dir)
}
//if(CurrentFlags & TriangleCollisionFlag::eACTIVE_EDGE20)
{
OUTPUT_TRI2(tris, p0, p2, p2b, dir)
OUTPUT_TRI2(tris, p0, p2b, p0b, dir)
}
//if(CurrentFlags & TriangleCollisionFlag::eACTIVE_EDGE01)
{
OUTPUT_TRI2(tris, p0b, p1b, p1, dir)
OUTPUT_TRI2(tris, p0b, p1, p0, dir)
}
}
return PxU32(ids-base);
}
static PxU32 extrudeBox(const PxBounds3& localBox, const PxTransform* world, const PxVec3& extrusionDir, PxTriangle* tris, const PxVec3& dir)
{
// Handle the box as a mesh
PxTriangle boxTris[12];
PxVec3 p[8];
computeBoxPoints(localBox, p);
const PxU8* PX_RESTRICT indices = getBoxTriangles();
for(PxU32 i=0; i<12; i++)
{
const PxU8 VRef0 = indices[i*3+0];
const PxU8 VRef1 = indices[i*3+1];
const PxU8 VRef2 = indices[i*3+2];
PxVec3 p0 = p[VRef0];
PxVec3 p1 = p[VRef1];
PxVec3 p2 = p[VRef2];
if(world)
{
p0 = world->transform(p0);
p1 = world->transform(p1);
p2 = world->transform(p2);
}
boxTris[i].verts[0] = p0;
boxTris[i].verts[1] = p1;
boxTris[i].verts[2] = p2;
}
PxU32 fakeIDs[12*7];
return extrudeMesh(12, boxTris, extrusionDir, tris, fakeIDs, dir);
}
//
// The problem of testing a swept capsule against a box is transformed into sweeping a sphere (lying at the center
// of the capsule) against the extruded triangles of the box. The box triangles are extruded along the
// capsule segment axis.
//
bool Gu::sweepCapsuleBox(const Capsule& capsule, const PxTransform& boxWorldPose, const PxVec3& boxDim, const PxVec3& dir, PxReal length, PxVec3& hit, PxReal& min_dist, PxVec3& normal, PxHitFlags hitFlags)
{
if(!(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP))
{
// PT: test if shapes initially overlap
if(distanceSegmentBoxSquared(capsule.p0, capsule.p1, boxWorldPose.p, boxDim, PxMat33Padded(boxWorldPose.q)) < capsule.radius*capsule.radius)
{
min_dist = 0.0f;
normal = -dir;
return true;
}
}
// Extrusion dir = capsule segment
const PxVec3 extrusionDir = (capsule.p1 - capsule.p0)*0.5f;
// Extrude box
PxReal MinDist = length;
bool Status = false;
{
const PxBounds3 aabb(-boxDim, boxDim);
PX_ALLOCA(triangles, PxTriangle, 12*7);
const PxU32 nbTris = extrudeBox(aabb, &boxWorldPose, extrusionDir, triangles, dir);
PX_ASSERT(nbTris<=12*7);
// Sweep sphere vs extruded box
PxGeomSweepHit h; // PT: TODO: ctor!
PxVec3 bestNormal;
if(sweepSphereTriangles(nbTris, triangles, capsule.computeCenter(), capsule.radius, dir, length, NULL, h, bestNormal, false, false, false, false))
{
hit = h.position;
MinDist = h.distance;
normal = h.normal;
Status = true;
}
}
min_dist = MinDist;
return Status;
}
| 6,614 | C++ | 29.625 | 205 | 0.695192 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepBoxTriangle_FeatureBased.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxBounds3.h"
#include "geometry/PxTriangle.h"
#include "GuSweepBoxTriangle_FeatureBased.h"
#include "GuIntersectionRayBox.h"
#include "GuSweepTriangleUtils.h"
#include "GuInternal.h"
using namespace physx;
using namespace Gu;
#define LOCAL_EPSILON 0.00001f // PT: this value makes the 'basicAngleTest' pass. Fails because of a ray almost parallel to a triangle
namespace
{
static const PxReal gFatTriangleCoeff = 0.02f;
static const PxVec3 gNearPlaneNormal[] =
{
PxVec3(1.0f, 0.0f, 0.0f),
PxVec3(0.0f, 1.0f, 0.0f),
PxVec3(0.0f, 0.0f, 1.0f),
PxVec3(-1.0f, 0.0f, 0.0f),
PxVec3(0.0f, -1.0f, 0.0f),
PxVec3(0.0f, 0.0f, -1.0f)
};
}
#define INVSQRT3 0.577350269189f //!< 1 / sqrt(3)
/**
Returns vertex normals.
\return 24 floats (8 normals)
*/
static const PxF32* getBoxVertexNormals()
{
// 7+------+6 0 = ---
// /| /| 1 = +--
// / | / | 2 = ++-
// / 4+---/--+5 3 = -+-
// 3+------+2 / y z 4 = --+
// | / | / | / 5 = +-+
// |/ |/ |/ 6 = +++
// 0+------+1 *---x 7 = -++
static PxF32 VertexNormals[] =
{
-INVSQRT3, -INVSQRT3, -INVSQRT3,
INVSQRT3, -INVSQRT3, -INVSQRT3,
INVSQRT3, INVSQRT3, -INVSQRT3,
-INVSQRT3, INVSQRT3, -INVSQRT3,
-INVSQRT3, -INVSQRT3, INVSQRT3,
INVSQRT3, -INVSQRT3, INVSQRT3,
INVSQRT3, INVSQRT3, INVSQRT3,
-INVSQRT3, INVSQRT3, INVSQRT3
};
return VertexNormals;
}
static PxTriangle inflateTriangle(const PxTriangle& triangle, PxReal fat_coeff)
{
PxTriangle fatTri = triangle;
// Compute triangle center
const PxVec3& p0 = triangle.verts[0];
const PxVec3& p1 = triangle.verts[1];
const PxVec3& p2 = triangle.verts[2];
const PxVec3 center = (p0 + p1 + p2)*0.333333333f;
// Don't normalize?
// Normalize => add a constant border, regardless of triangle size
// Don't => add more to big triangles
for(PxU32 i=0;i<3;i++)
{
const PxVec3 v = fatTri.verts[i] - center;
fatTri.verts[i] += v * fat_coeff;
}
return fatTri;
}
// PT: special version to fire N parallel rays against the same tri
static PX_FORCE_INLINE PxIntBool rayTriPrecaCull( const PxVec3& orig, const PxVec3& dir,
const PxVec3& vert0, const PxVec3& edge1, const PxVec3& edge2, const PxVec3& pvec,
PxReal det, PxReal oneOverDet, PxReal& t)
{
// Calculate distance from vert0 to ray origin
const PxVec3 tvec = orig - vert0;
// Calculate U parameter and test bounds
PxReal u = tvec.dot(pvec);
if((u < 0.0f) || u>det)
return 0;
// Prepare to test V parameter
const PxVec3 qvec = tvec.cross(edge1);
// Calculate V parameter and test bounds
PxReal v = dir.dot(qvec);
if((v < 0.0f) || u+v>det)
return 0;
// Calculate t, scale parameters, ray intersects triangle
t = edge2.dot(qvec);
t *= oneOverDet;
return 1;
}
static PX_FORCE_INLINE PxIntBool rayTriPrecaNoCull( const PxVec3& orig, const PxVec3& dir,
const PxVec3& vert0, const PxVec3& edge1, const PxVec3& edge2, const PxVec3& pvec,
PxReal /*det*/, PxReal oneOverDet, PxReal& t)
{
// Calculate distance from vert0 to ray origin
const PxVec3 tvec = orig - vert0;
// Calculate U parameter and test bounds
PxReal u = (tvec.dot(pvec)) * oneOverDet;
if((u < 0.0f) || u>1.0f)
return 0;
// prepare to test V parameter
const PxVec3 qvec = tvec.cross(edge1);
// Calculate V parameter and test bounds
PxReal v = (dir.dot(qvec)) * oneOverDet;
if((v < 0.0f) || u+v>1.0f)
return 0;
// Calculate t, ray intersects triangle
t = (edge2.dot(qvec)) * oneOverDet;
return 1;
}
// PT: specialized version where oneOverDir is available
// PT: why did we change the initial epsilon value?
#define LOCAL_EPSILON_RAY_BOX PX_EPS_F32
//#define LOCAL_EPSILON_RAY_BOX 0.0001f
static PX_FORCE_INLINE int intersectRayAABB2(const PxVec3& minimum, const PxVec3& maximum,
const PxVec3& ro, const PxVec3& /*rd*/, const PxVec3& oneOverDir,
float& tnear, float& tfar,
bool fbx, bool fby, bool fbz)
{
// PT: this unrolled loop is a lot faster on Xbox
if(fbx)
if(ro.x<minimum.x || ro.x>maximum.x)
{
// tnear = FLT_MAX;
return -1;
}
if(fby)
if(ro.y<minimum.y || ro.y>maximum.y)
{
// tnear = FLT_MAX;
return -1;
}
if(fbz)
if(ro.z<minimum.z || ro.z>maximum.z)
{
// tnear = FLT_MAX;
return -1;
}
PxReal t1x = (minimum.x - ro.x) * oneOverDir.x;
PxReal t2x = (maximum.x - ro.x) * oneOverDir.x;
PxReal t1y = (minimum.y - ro.y) * oneOverDir.y;
PxReal t2y = (maximum.y - ro.y) * oneOverDir.y;
PxReal t1z = (minimum.z - ro.z) * oneOverDir.z;
PxReal t2z = (maximum.z - ro.z) * oneOverDir.z;
int bx;
int by;
int bz;
if(t1x>t2x)
{
PxReal t=t1x; t1x=t2x; t2x=t;
bx = 3;
}
else
{
bx = 0;
}
if(t1y>t2y)
{
PxReal t=t1y; t1y=t2y; t2y=t;
by = 4;
}
else
{
by = 1;
}
if(t1z>t2z)
{
PxReal t=t1z; t1z=t2z; t2z=t;
bz = 5;
}
else
{
bz = 2;
}
int ret;
if(!fbx)
{
// if(t1x>tnear) // PT: no need to test for the first value
{
tnear = t1x;
ret = bx;
}
// tfar = Px::intrinsics::selectMin(tfar, t2x);
tfar = t2x; // PT: no need to test for the first value
}
else
{
ret=-1;
tnear = -PX_MAX_F32;
tfar = PX_MAX_F32;
}
if(!fby)
{
if(t1y>tnear)
{
tnear = t1y;
ret = by;
}
tfar = physx::intrinsics::selectMin(tfar, t2y);
}
if(!fbz)
{
if(t1z>tnear)
{
tnear = t1z;
ret = bz;
}
tfar = physx::intrinsics::selectMin(tfar, t2z);
}
if(tnear>tfar || tfar<LOCAL_EPSILON_RAY_BOX)
return -1;
return ret;
}
// PT: force-inlining this saved 500.000 cycles in the benchmark. Ok to inline, only used once anyway.
static PX_FORCE_INLINE bool intersectEdgeEdge3(const PxPlane& plane, const PxVec3& p1, const PxVec3& p2, const PxVec3& dir, const PxVec3& v1,
const PxVec3& p3, const PxVec3& p4,
PxReal& dist, PxVec3& ip, PxU32 i, PxU32 j, const PxReal coeff)
{
// if colliding edge (p3,p4) does not cross plane return no collision
// same as if p3 and p4 on same side of plane return 0
//
// Derivation:
// d3 = d(p3, P) = (p3 | plane.n) - plane.d; Reversed sign compared to Plane::Distance() because plane.d is negated.
// d4 = d(p4, P) = (p4 | plane.n) - plane.d; Reversed sign compared to Plane::Distance() because plane.d is negated.
// if d3 and d4 have the same sign, they're on the same side of the plane => no collision
// We test both sides at the same time by only testing Sign(d3 * d4).
// ### put that in the Plane class
// ### also check that code in the triangle class that might be similar
const PxReal d3 = plane.distance(p3);
const PxReal temp = d3 * plane.distance(p4);
if(temp>0.0f) return false;
// if colliding edge (p3,p4) and plane are parallel return no collision
const PxVec3 v2 = p4 - p3;
const PxReal temp2 = plane.n.dot(v2);
if(temp2==0.0f) return false; // ### epsilon would be better
// compute intersection point of plane and colliding edge (p3,p4)
ip = p3-v2*(d3/temp2);
// compute distance of intersection from line (ip, -dir) to line (p1,p2)
dist = (v1[i]*(ip[j]-p1[j])-v1[j]*(ip[i]-p1[i])) * coeff;
if(dist<0.0f) return false;
// compute intersection point on edge (p1,p2) line
ip -= dist*dir;
// check if intersection point (ip) is between edge (p1,p2) vertices
const PxReal temp3 = (p1.x-ip.x)*(p2.x-ip.x)+(p1.y-ip.y)*(p2.y-ip.y)+(p1.z-ip.z)*(p2.z-ip.z);
return temp3<0.0f;
}
namespace
{
static const PxReal gFatBoxEdgeCoeff = 0.01f;
#define INVSQRT2 0.707106781188f //!< 1 / sqrt(2)
static const PxVec3 EdgeNormals[] =
{
PxVec3(0, -INVSQRT2, -INVSQRT2), // 0-1
PxVec3(INVSQRT2, 0, -INVSQRT2), // 1-2
PxVec3(0, INVSQRT2, -INVSQRT2), // 2-3
PxVec3(-INVSQRT2, 0, -INVSQRT2), // 3-0
PxVec3(0, INVSQRT2, INVSQRT2), // 7-6
PxVec3(INVSQRT2, 0, INVSQRT2), // 6-5
PxVec3(0, -INVSQRT2, INVSQRT2), // 5-4
PxVec3(-INVSQRT2, 0, INVSQRT2), // 4-7
PxVec3(INVSQRT2, -INVSQRT2, 0), // 1-5
PxVec3(INVSQRT2, INVSQRT2, 0), // 6-2
PxVec3(-INVSQRT2, INVSQRT2, 0), // 3-7
PxVec3(-INVSQRT2, -INVSQRT2, 0) // 4-0
};
static const PxVec3* getBoxLocalEdgeNormals()
{
return EdgeNormals;
}
}
static PX_FORCE_INLINE void closestAxis2(const PxVec3& v, PxU32& j, PxU32& k)
{
// find largest 2D plane projection
const PxF32 absPx = physx::intrinsics::abs(v.x);
const PxF32 absPy = physx::intrinsics::abs(v.y);
const PxF32 absPz = physx::intrinsics::abs(v.z);
//PxU32 m = 0; //x biggest axis
j = 1;
k = 2;
if( absPy > absPx && absPy > absPz)
{
//y biggest
j = 2;
k = 0;
//m = 1;
}
else if(absPz > absPx)
{
//z biggest
j = 0;
k = 1;
//m = 2;
}
// return m;
}
bool Gu::sweepBoxTriangle( const PxTriangle& tri, const PxBounds3& box,
const PxVec3& motion, const PxVec3& oneOverMotion,
PxVec3& hit, PxVec3& normal, PxReal& d, bool isDoubleSided)
{
// Create triangle normal
PxVec3 denormalizedTriNormal;
tri.denormalizedNormal(denormalizedTriNormal);
// Backface culling
const bool doBackfaceCulling = !isDoubleSided;
if(doBackfaceCulling && (denormalizedTriNormal.dot(motion)) >= 0.0f) // ">=" is important !
return false;
/////////////////////////
PxVec3 boxVertices[8];
computeBoxPoints(box, boxVertices);
/////////////////////////
// Make fat triangle
const PxTriangle fatTri = inflateTriangle(tri, gFatTriangleCoeff);
PxReal minDist = d; // Initialize with current best distance
int col = -1;
// Box vertices VS triangle
{
// ### cull using box-plane distance ?
const PxVec3 edge1 = fatTri.verts[1] - fatTri.verts[0];
const PxVec3 edge2 = fatTri.verts[2] - fatTri.verts[0];
const PxVec3 PVec = motion.cross(edge2);
const PxReal Det = edge1.dot(PVec);
// We can't use stamps here since we can still find a better TOI for a given vertex,
// even if that vertex has already been tested successfully against another triangle.
const PxVec3* VN = reinterpret_cast<const PxVec3*>(getBoxVertexNormals());
const PxReal oneOverDet = Det!=0.0f ? 1.0f / Det : 0.0f;
PxU32 hitIndex=0;
if(doBackfaceCulling)
{
if(Det>=LOCAL_EPSILON)
{
for(PxU32 i=0;i<8;i++)
{
// Orientation culling
if((VN[i].dot(denormalizedTriNormal) >= 0.0f)) // Can't rely on triangle normal for double-sided faces
continue;
// ### test this
// ### ok, this causes the bug in level3's v-shaped desk. Not really a real "bug", it just happens
// that this VF test fixes this case, so it's a bad idea to cull it. Oh, well.
// If we use a penetration-depth code to fixup bad cases, we can enable this culling again. (also
// if we find a better way to handle that desk)
// Discard back vertices
// if(VN[i].dot(motion)<0.0f)
// continue;
// Shoot a ray from vertex against triangle, in direction "motion"
PxReal t;
if(!rayTriPrecaCull(boxVertices[i], motion, fatTri.verts[0], edge1, edge2, PVec, Det, oneOverDet, t))
continue;
//if(t<=OffsetLength) t=0.0f;
// Only consider positive distances, closer than current best
// ### we could test that first on tri vertices & discard complete tri if it's further than current best (or equal!)
if(t < 0.0f || t > minDist)
continue;
minDist = t;
col = 0;
// hit = boxVertices[i] + t * motion;
hitIndex = i;
}
}
}
else
{
if(Det<=-LOCAL_EPSILON || Det>=LOCAL_EPSILON)
{
for(PxU32 i=0;i<8;i++)
{
// ### test this
// ### ok, this causes the bug in level3's v-shaped desk. Not really a real "bug", it just happens
// that this VF test fixes this case, so it's a bad idea to cull it. Oh, well.
// If we use a penetration-depth code to fixup bad cases, we can enable this culling again. (also
// if we find a better way to handle that desk)
// Discard back vertices
// if(!VN[i].SameDirection(motion))
// continue;
// Shoot a ray from vertex against triangle, in direction "motion"
PxReal t;
if(!rayTriPrecaNoCull(boxVertices[i], motion, fatTri.verts[0], edge1, edge2, PVec, Det, oneOverDet, t))
continue;
//if(t<=OffsetLength) t=0.0f;
// Only consider positive distances, closer than current best
// ### we could test that first on tri vertices & discard complete tri if it's further than current best (or equal!)
if(t < 0.0f || t > minDist)
continue;
minDist = t;
col = 0;
// hit = boxVertices[i] + t * motion;
hitIndex = i;
}
}
}
// Only copy this once, if needed
if(col==0)
{
// PT: hit point on triangle
hit = boxVertices[hitIndex] + minDist * motion;
normal = denormalizedTriNormal;
}
}
// Triangle vertices VS box
{
const PxVec3 negMotion = -motion;
const PxVec3 negInvMotion = -oneOverMotion;
// PT: precompute fabs-test for ray-box
// - doing this outside of the ray-box function gets rid of 3 fabs/fcmp per call
// - doing this with integer code removes the 3 remaining fabs/fcmps totally
// - doing this outside reduces the LHS
const bool b0 = physx::intrinsics::abs(negMotion.x)<LOCAL_EPSILON_RAY_BOX;
const bool b1 = physx::intrinsics::abs(negMotion.y)<LOCAL_EPSILON_RAY_BOX;
const bool b2 = physx::intrinsics::abs(negMotion.z)<LOCAL_EPSILON_RAY_BOX;
// ### have this as a param ?
const PxVec3& Min = box.minimum;
const PxVec3& Max = box.maximum;
const PxVec3* boxNormals = gNearPlaneNormal;
// ### use stamps not to shoot shared vertices multiple times
// ### discard non-convex verts
for(PxU32 i=0;i<3;i++)
{
PxReal tnear, tfar;
const int plane = ::intersectRayAABB2(Min, Max, tri.verts[i], negMotion, negInvMotion, tnear, tfar, b0, b1, b2);
PX_ASSERT(plane == intersectRayAABB(Min, Max, tri.verts[i], negMotion, tnear, tfar));
// The following works as well but we need to call "intersectRayAABB" to get a plane index compatible with BoxNormals.
// We could fix this by unifying the plane indices returned by the different ray-aabb functions...
//PxVec3 coord;
//PxReal t;
//PxU32 status = rayAABBIntersect2(Min, Max, tri.verts[i], -motion, coord, t);
// ### don't test -1 ?
if(plane==-1 || tnear<0.0f) continue;
// if(tnear<0.0f) continue;
if(tnear <= minDist)
{
minDist = tnear; // ### warning, tnear => flips normals
normal = boxNormals[plane];
col = 1;
// PT: hit point on triangle
hit = tri.verts[i];
}
}
}
PxU32 saved_j = PX_INVALID_U32;
PxU32 saved_k = PX_INVALID_U32;
PxVec3 p1s;
PxVec3 v1s;
// Edge-vs-edge
{
// Loop through box edges
const PxU8* PX_RESTRICT edges = getBoxEdges();
const PxVec3* PX_RESTRICT edgeNormals = getBoxLocalEdgeNormals();
for(PxU32 i=0;i<12;i++) // 12 edges
{
// PT: TODO: skip this if edge is culled
PxVec3 p1 = boxVertices[*edges++];
PxVec3 p2 = boxVertices[*edges++];
makeFatEdge(p1, p2, gFatBoxEdgeCoeff);
if(edgeNormals[i].dot(motion) < 0.0f)
continue;
// While we're at it, precompute some more data for EE tests
const PxVec3 v1 = p2 - p1;
// Build plane P based on edge (p1, p2) and direction (dir)
const PxVec3 planeNormal = v1.cross(motion);
const PxPlane plane(planeNormal, -(planeNormal.dot(p1)));
// find largest 2D plane projection
PxU32 closest_i, closest_j;
// closestAxis(plane.normal, ii, jj);
closestAxis2(planeNormal, closest_i, closest_j);
const PxReal coeff = 1.0f / (v1[closest_i]*motion[closest_j] - v1[closest_j]*motion[closest_i]);
// Loop through triangle edges
for(PxU32 j=0; j<3; j++)
{
// Catch current triangle edge
// j=0 => 0-1
// j=1 => 1-2
// j=2 => 2-0
// => this is compatible with EdgeList
const PxU32 k = PxGetNextIndex3(j);
PxReal dist;
PxVec3 ip;
if(intersectEdgeEdge3(plane, p1, p2, motion, v1, tri.verts[j], tri.verts[k], dist, ip, closest_i, closest_j, coeff))
{
if(dist<=minDist)
{
p1s = p1;
v1s = v1;
saved_j = j;
saved_k = k;
col = 2;
minDist = dist;
// PT: hit point on triangle
hit = ip + motion*dist;
}
}
}
}
}
if(col==-1)
return false;
if(col==2)
{
PX_ASSERT(saved_j != PX_INVALID_U32);
PX_ASSERT(saved_k != PX_INVALID_U32);
const PxVec3& p3 = tri.verts[saved_j];
const PxVec3& p4 = tri.verts[saved_k];
computeEdgeEdgeNormal(normal, p1s, v1s, p3, p4-p3, motion, minDist);
}
d = minDist;
return true;
}
| 17,961 | C++ | 27.831461 | 141 | 0.649964 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepTriangleUtils.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxBounds3.h"
#include "GuSweepTriangleUtils.h"
#include "GuDistancePointTriangle.h"
#include "GuVecTriangle.h"
#include "GuVecBox.h"
#include "GuSweepBoxTriangle_FeatureBased.h"
#include "GuInternal.h"
#include "GuGJK.h"
using namespace physx;
using namespace Gu;
using namespace physx::aos;
#define GU_SAFE_DISTANCE_FOR_NORMAL_COMPUTATION 0.1f
void Gu::computeSphereTriImpactData(PxVec3& hit, PxVec3& normal, const PxVec3& center, const PxVec3& dir, float t, const PxTriangle& tri)
{
const PxVec3 newSphereCenter = center + dir*t;
// We need the impact point, not computed by the new code
PxReal u_unused, v_unused;
const PxVec3 localHit = closestPtPointTriangle(newSphereCenter, tri.verts[0], tri.verts[1], tri.verts[2], u_unused, v_unused);
PX_UNUSED(u_unused);
PX_UNUSED(v_unused);
// This is responsible for the cap-vs-box stuck while jumping. However it's needed to slide on box corners!
// PT: this one is also dubious since the sphere/capsule center can be far away from the hit point when the radius is big!
PxVec3 localNormal = newSphereCenter - localHit;
const PxReal m = localNormal.normalize();
if(m<1e-3f)
tri.normal(localNormal);
hit = localHit;
normal = localNormal;
}
// PT: not inlining this rarely-run function makes the benchmark ~500.000 cycles faster...
// PT: using this version all the time makes the benchmark ~300.000 cycles slower. So we just use it as a backup.
static bool runBackupProcedure(PxVec3& hit, PxVec3& normal, const PxVec3& localMotion, const PxVec3& boxExtents, const PxTriangle& triInBoxSpace)
{
const Vec3V v0 = V3LoadU(triInBoxSpace.verts[0]);
const Vec3V v1 = V3LoadU(triInBoxSpace.verts[1]);
const Vec3V v2 = V3LoadU(triInBoxSpace.verts[2]);
const TriangleV triangleV(v0, v1, v2);
// PT: the box is in the triangle's space already
//BoxV boxV(V3LoadU(PxVec3(0.0f)), V3LoadU(boxExtents),
// V3LoadU(PxVec3(1.0f, 0.0f, 0.0f)), V3LoadU(PxVec3(0.0f, 1.0f, 0.0f)), V3LoadU(PxVec3(0.0f, 0.0f, 1.0f)));
const BoxV boxV(V3Zero(), V3LoadU(boxExtents));
Vec3V closestA;
Vec3V closestB;
Vec3V normalV;
FloatV distV;
const LocalConvex<TriangleV> convexA(triangleV);
const LocalConvex<BoxV> convexB(boxV);
const Vec3V initialSearchDir = V3Sub(triangleV.getCenter(), boxV.getCenter());
const FloatV contactDist = FMax();
GjkStatus status_ = gjk<LocalConvex<TriangleV>, LocalConvex<BoxV> >(convexA, convexB, initialSearchDir, contactDist, closestA, closestB, normalV, distV);
if(status_==GJK_CONTACT)
return false;
PxVec3 ml_closestB;
PxVec3 ml_normal;
V3StoreU(closestB, ml_closestB);
V3StoreU(normalV, ml_normal);
hit = ml_closestB + localMotion;
// normal = -ml_normal;
if((ml_normal.dot(localMotion))>0.0f)
ml_normal = -ml_normal;
normal = ml_normal;
return true;
}
void Gu::computeBoxTriImpactData(PxVec3& hit, PxVec3& normal, const PxVec3& boxExtents, const PxVec3& localDir, const PxTriangle& triInBoxSpace, PxReal impactDist)
{
// PT: the triangle is in "box space", i.e. the box can be seen as an AABB centered around the origin.
// PT: compute impact point/normal in a second pass. Here we simply re-sweep the box against the best triangle,
// using the feature-based code (which computes impact point and normal). This is not great because:
// - we know there's an impact so why do all tests again?
// - the SAT test & the feature-based tests could return different results because of FPU accuracy.
// The backup procedure makes sure we compute a proper answer even when the SAT and feature-based versions differ.
const PxBounds3 aabb(-boxExtents, boxExtents);
const PxVec3 oneOverDir(
localDir.x!=0.0f ? 1.0f/localDir.x : 0.0f,
localDir.y!=0.0f ? 1.0f/localDir.y : 0.0f,
localDir.z!=0.0f ? 1.0f/localDir.z : 0.0f);
// PT: TODO: this is the only place left using sweepBoxTriangle()
// Backface culling could be removed here since we know we want a hit no matter what. Plus, it's sometimes
// incorrectly culled and we hit the backup procedure for no reason. On Win32Modern for unknown reasons
// returned normal is sometimes (0,0,0). In these cases we also switch to the backup procedure.
float t = PX_MAX_F32; // PT: no need to initialize with best dist here since we want a hit no matter what
if(!sweepBoxTriangle(triInBoxSpace, aabb, localDir, oneOverDir, hit, normal, t) || normal.isZero())
{
// PT: move triangle close to box
const PxVec3 localMotion = localDir*impactDist;
const PxVec3 delta = localMotion - localDir*GU_SAFE_DISTANCE_FOR_NORMAL_COMPUTATION;
const PxTriangle movedTriangle(
triInBoxSpace.verts[0] - delta,
triInBoxSpace.verts[1] - delta,
triInBoxSpace.verts[2] - delta);
if(!runBackupProcedure(hit, normal, localMotion, boxExtents, movedTriangle))
{
// PT: if the backup procedure fails, we give up
hit = PxVec3(0.0f);
normal = -localDir;
}
}
}
// PT: copy where we know that input vectors are not zero
static PX_FORCE_INLINE void edgeEdgeDistNoZeroVector( PxVec3& x, PxVec3& y, // closest points
const PxVec3& p, const PxVec3& a, // seg 1 origin, vector
const PxVec3& q, const PxVec3& b) // seg 2 origin, vector
{
const PxVec3 T = q - p;
const PxReal ADotA = a.dot(a);
const PxReal BDotB = b.dot(b);
PX_ASSERT(ADotA!=0.0f);
PX_ASSERT(BDotB!=0.0f);
const PxReal ADotB = a.dot(b);
const PxReal ADotT = a.dot(T);
const PxReal BDotT = b.dot(T);
// t parameterizes ray (p, a)
// u parameterizes ray (q, b)
// Compute t for the closest point on ray (p, a) to ray (q, b)
const PxReal Denom = ADotA*BDotB - ADotB*ADotB;
PxReal t; // We will clamp result so t is on the segment (p, a)
if(Denom!=0.0f)
t = PxClamp((ADotT*BDotB - BDotT*ADotB) / Denom, 0.0f, 1.0f);
else
t = 0.0f;
// find u for point on ray (q, b) closest to point at t
PxReal u;
{
u = (t*ADotB - BDotT) / BDotB;
// if u is on segment (q, b), t and u correspond to closest points, otherwise, clamp u, recompute and clamp t
if(u<0.0f)
{
u = 0.0f;
t = PxClamp(ADotT / ADotA, 0.0f, 1.0f);
}
else if(u > 1.0f)
{
u = 1.0f;
t = PxClamp((ADotB + ADotT) / ADotA, 0.0f, 1.0f);
}
}
x = p + a * t;
y = q + b * u;
}
void Gu::computeEdgeEdgeNormal(PxVec3& normal, const PxVec3& p1, const PxVec3& p2_p1, const PxVec3& p3, const PxVec3& p4_p3, const PxVec3& dir, float d)
{
// PT: cross-product doesn't produce nice normals so we use an edge-edge distance function itself
// PT: move the edges "0.1" units from each other before the computation. If the edges are too far
// away, computed normal tend to align itself with the swept direction. If the edges are too close,
// closest points x and y become identical and we can't compute a proper normal.
const PxVec3 p1s = p1 + dir*(d-GU_SAFE_DISTANCE_FOR_NORMAL_COMPUTATION);
PxVec3 x, y;
edgeEdgeDistNoZeroVector(x, y, p1s, p2_p1, p3, p4_p3);
normal = x - y;
}
| 8,561 | C++ | 39.966507 | 163 | 0.722696 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepCapsuleCapsule.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "geometry/PxTriangle.h"
#include "PxQueryReport.h"
#include "GuSweepCapsuleCapsule.h"
#include "GuCapsule.h"
#include "GuDistancePointSegment.h"
#include "GuDistanceSegmentSegment.h"
#include "GuIntersectionRayCapsule.h"
using namespace physx;
using namespace Gu;
#define LOCAL_EPSILON 0.00001f // PT: this value makes the 'basicAngleTest' pass. Fails because of a ray almost parallel to a triangle
void edgeEdgeDist(PxVec3& x, PxVec3& y, // closest points
const PxVec3& p, const PxVec3& a, // seg 1 origin, vector
const PxVec3& q, const PxVec3& b) // seg 2 origin, vector
{
const PxVec3 T = q - p;
const PxReal ADotA = a.dot(a);
const PxReal BDotB = b.dot(b);
const PxReal ADotB = a.dot(b);
const PxReal ADotT = a.dot(T);
const PxReal BDotT = b.dot(T);
// t parameterizes ray (p, a)
// u parameterizes ray (q, b)
// Compute t for the closest point on ray (p, a) to ray (q, b)
const PxReal Denom = ADotA*BDotB - ADotB*ADotB;
PxReal t; // We will clamp result so t is on the segment (p, a)
if(Denom!=0.0f)
t = PxClamp((ADotT*BDotB - BDotT*ADotB) / Denom, 0.0f, 1.0f);
else
t = 0.0f;
// find u for point on ray (q, b) closest to point at t
PxReal u;
if(BDotB!=0.0f)
{
u = (t*ADotB - BDotT) / BDotB;
// if u is on segment (q, b), t and u correspond to closest points, otherwise, clamp u, recompute and clamp t
if(u<0.0f)
{
u = 0.0f;
if(ADotA!=0.0f)
t = PxClamp(ADotT / ADotA, 0.0f, 1.0f);
else
t = 0.0f;
}
else if(u > 1.0f)
{
u = 1.0f;
if(ADotA!=0.0f)
t = PxClamp((ADotB + ADotT) / ADotA, 0.0f, 1.0f);
else
t = 0.0f;
}
}
else
{
u = 0.0f;
if(ADotA!=0.0f)
t = PxClamp(ADotT / ADotA, 0.0f, 1.0f);
else
t = 0.0f;
}
x = p + a * t;
y = q + b * u;
}
static bool rayQuad(const PxVec3& orig, const PxVec3& dir, const PxVec3& vert0, const PxVec3& vert1, const PxVec3& vert2, PxReal& t, PxReal& u, PxReal& v, bool cull)
{
// Find vectors for two edges sharing vert0
const PxVec3 edge1 = vert1 - vert0;
const PxVec3 edge2 = vert2 - vert0;
// Begin calculating determinant - also used to calculate U parameter
const PxVec3 pvec = dir.cross(edge2);
// If determinant is near zero, ray lies in plane of triangle
const PxReal det = edge1.dot(pvec);
if(cull)
{
if(det<LOCAL_EPSILON)
return false;
// Calculate distance from vert0 to ray origin
const PxVec3 tvec = orig - vert0;
// Calculate U parameter and test bounds
u = tvec.dot(pvec);
if(u<0.0f || u>det)
return false;
// Prepare to test V parameter
const PxVec3 qvec = tvec.cross(edge1);
// Calculate V parameter and test bounds
v = dir.dot(qvec);
if(v<0.0f || v>det)
return false;
// Calculate t, scale parameters, ray intersects triangle
t = edge2.dot(qvec);
const PxReal oneOverDet = 1.0f / det;
t *= oneOverDet;
u *= oneOverDet;
v *= oneOverDet;
}
else
{
// the non-culling branch
if(det>-LOCAL_EPSILON && det<LOCAL_EPSILON)
return false;
const PxReal oneOverDet = 1.0f / det;
// Calculate distance from vert0 to ray origin
const PxVec3 tvec = orig - vert0;
// Calculate U parameter and test bounds
u = (tvec.dot(pvec)) * oneOverDet;
if(u<0.0f || u>1.0f)
return false;
// prepare to test V parameter
const PxVec3 qvec = tvec.cross(edge1);
// Calculate V parameter and test bounds
v = (dir.dot(qvec)) * oneOverDet;
if(v<0.0f || v>1.0f)
return false;
// Calculate t, ray intersects triangle
t = (edge2.dot(qvec)) * oneOverDet;
}
return true;
}
bool Gu::sweepCapsuleCapsule(const Capsule& capsule0, const Capsule& capsule1, const PxVec3& dir, PxReal length, PxReal& min_dist, PxVec3& ip, PxVec3& normal, PxU32 inHitFlags, PxU16& outHitFlags)
{
const PxReal radiusSum = capsule0.radius + capsule1.radius;
if(!(inHitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP))
{
// PT: test if shapes initially overlap
// PT: It would be better not to use the same code path for spheres and capsules. The segment-segment distance
// function doesn't work for degenerate capsules so we need to test all combinations here anyway.
bool initialOverlapStatus;
if(capsule0.p0==capsule0.p1)
initialOverlapStatus = distancePointSegmentSquared(capsule1, capsule0.p0)<radiusSum*radiusSum;
else if(capsule1.p0==capsule1.p1)
initialOverlapStatus = distancePointSegmentSquared(capsule0, capsule1.p0)<radiusSum*radiusSum;
else
initialOverlapStatus = distanceSegmentSegmentSquared(capsule0, capsule1)<radiusSum*radiusSum;
if(initialOverlapStatus)
{
min_dist = 0.0f;
normal = -dir;
outHitFlags = PxHitFlag::eNORMAL;
return true;
}
}
// 1. Extrude capsule0 by capsule1's length
// 2. Inflate extruded shape by capsule1's radius
// 3. Raycast against resulting shape
const PxVec3 capsuleExtent1 = capsule1.p1 - capsule1.p0;
// Extrusion dir = capsule segment
const PxVec3 D = capsuleExtent1*0.5f;
const PxVec3 p0 = capsule0.p0 - D;
const PxVec3 p1 = capsule0.p1 - D;
const PxVec3 p0b = capsule0.p0 + D;
const PxVec3 p1b = capsule0.p1 + D;
PxTriangle T(p0b, p1b, p1);
PxVec3 Normal;
T.normal(Normal);
PxReal MinDist = length;
bool Status = false;
PxVec3 pa,pb,pc;
if((Normal.dot(dir)) >= 0) // Same direction
{
Normal *= radiusSum;
pc = p0 - Normal;
pa = p1 - Normal;
pb = p1b - Normal;
}
else
{
Normal *= radiusSum;
pb = p0 + Normal;
pa = p1 + Normal;
pc = p1b + Normal;
}
PxReal t, u, v;
const PxVec3 center = capsule1.computeCenter();
if(rayQuad(center, dir, pa, pb, pc, t, u, v, true) && t>=0.0f && t<MinDist)
{
MinDist = t;
Status = true;
}
// PT: optimization: if we hit one of the quad we can't possibly get a better hit, so let's skip all
// the remaining tests!
if(!Status)
{
Capsule Caps[4];
Caps[0] = Capsule(p0, p1, radiusSum);
Caps[1] = Capsule(p1, p1b, radiusSum);
Caps[2] = Capsule(p1b, p0b, radiusSum);
Caps[3] = Capsule(p0, p0b, radiusSum);
// ### a lot of ray-sphere tests could be factored out of the ray-capsule tests...
for(PxU32 i=0;i<4;i++)
{
PxReal w;
if(intersectRayCapsule(center, dir, Caps[i], w))
{
if(w>=0.0f && w<= MinDist)
{
MinDist = w;
Status = true;
}
}
}
}
if(Status)
{
outHitFlags = PxHitFlags(0);
if(inHitFlags & PxU32(PxHitFlag::ePOSITION|PxHitFlag::eNORMAL))
{
const PxVec3 p00 = capsule0.p0 - MinDist * dir;
const PxVec3 p01 = capsule0.p1 - MinDist * dir;
// const PxVec3 p10 = capsule1.p0;// - MinDist * dir;
// const PxVec3 p11 = capsule1.p1;// - MinDist * dir;
const PxVec3 edge0 = p01 - p00;
const PxVec3 edge1 = capsuleExtent1;
PxVec3 x, y;
edgeEdgeDist(x, y, p00, edge0, capsule1.p0, edge1);
if(inHitFlags & PxHitFlag::eNORMAL)
{
normal = (x - y);
const float epsilon = 0.001f;
if(normal.normalize()<epsilon)
{
// PT: happens when radiuses are zero
normal = edge1.cross(edge0);
if(normal.normalize()<epsilon)
{
// PT: happens when edges are parallel
const PxVec3 capsuleExtent0 = capsule0.p1 - capsule0.p0;
edgeEdgeDist(x, y, capsule0.p0, capsuleExtent0, capsule1.p0, edge1);
normal = (x - y);
normal.normalize();
}
}
outHitFlags |= PxHitFlag::eNORMAL;
}
if(inHitFlags & PxHitFlag::ePOSITION)
{
ip = (capsule1.radius*x + capsule0.radius*y)/(capsule0.radius+capsule1.radius);
outHitFlags |= PxHitFlag::ePOSITION;
}
}
min_dist = MinDist;
}
return Status;
}
| 9,147 | C++ | 28.04127 | 196 | 0.677818 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepBoxTriangle_SAT.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SWEEP_BOX_TRIANGLE_SAT_H
#define GU_SWEEP_BOX_TRIANGLE_SAT_H
#include "geometry/PxTriangle.h"
#include "GuSweepSharedTests.h"
#include "GuInternal.h"
#define RetType int
#define MTDType bool
namespace physx
{
namespace Gu
{
// We have separation if one of those conditions is true:
// -BoxExt > TriMax (box strictly to the right of the triangle)
// BoxExt < TriMin (box strictly to the left of the triangle
// <=> d0 = -BoxExt - TriMax > 0
// d1 = BoxExt - TriMin < 0
// Hence we have overlap if d0 <= 0 and d1 >= 0
// overlap = (d0<=0.0f && d1>=0.0f)
#define TEST_OVERLAP \
const float d0 = -BoxExt - TriMax; \
const float d1 = BoxExt - TriMin; \
const bool bIntersect = (d0<=0.0f && d1>=0.0f); \
bValidMTD &= bIntersect;
// PT: inlining this one is important. Returning floats looks bad but is faster on Xbox.
static PX_FORCE_INLINE RetType testAxis(const PxTriangle& tri, const PxVec3& extents, const PxVec3& dir, const PxVec3& axis, MTDType& bValidMTD, float& tfirst, float& tlast)
{
const float d0t = tri.verts[0].dot(axis);
const float d1t = tri.verts[1].dot(axis);
const float d2t = tri.verts[2].dot(axis);
float TriMin = PxMin(d0t, d1t);
float TriMax = PxMax(d0t, d1t);
TriMin = PxMin(TriMin, d2t);
TriMax = PxMax(TriMax, d2t);
////////
const float BoxExt = PxAbs(axis.x)*extents.x + PxAbs(axis.y)*extents.y + PxAbs(axis.z)*extents.z;
TEST_OVERLAP
const float v = dir.dot(axis);
if(PxAbs(v) < 1.0E-6f)
return bIntersect;
const float oneOverV = -1.0f / v;
// float t0 = d0 * oneOverV;
// float t1 = d1 * oneOverV;
// if(t0 > t1) TSwap(t0, t1);
const float t0_ = d0 * oneOverV;
const float t1_ = d1 * oneOverV;
float t0 = PxMin(t0_, t1_);
float t1 = PxMax(t0_, t1_);
if(t0 > tlast) return false;
if(t1 < tfirst) return false;
// if(t1 < tlast) tlast = t1;
tlast = PxMin(t1, tlast);
// if(t0 > tfirst) tfirst = t0;
tfirst = PxMax(t0, tfirst);
return true;
}
template<const int XYZ>
static PX_FORCE_INLINE RetType testAxisXYZ(const PxTriangle& tri, const PxVec3& extents, const PxVec3& dir, float oneOverDir, MTDType& bValidMTD, float& tfirst, float& tlast)
{
const float d0t = tri.verts[0][XYZ];
const float d1t = tri.verts[1][XYZ];
const float d2t = tri.verts[2][XYZ];
float TriMin = PxMin(d0t, d1t);
float TriMax = PxMax(d0t, d1t);
TriMin = PxMin(TriMin, d2t);
TriMax = PxMax(TriMax, d2t);
////////
const float BoxExt = extents[XYZ];
TEST_OVERLAP
const float v = dir[XYZ];
if(PxAbs(v) < 1.0E-6f)
return bIntersect;
const float oneOverV = -oneOverDir;
// float t0 = d0 * oneOverV;
// float t1 = d1 * oneOverV;
// if(t0 > t1) TSwap(t0, t1);
const float t0_ = d0 * oneOverV;
const float t1_ = d1 * oneOverV;
float t0 = PxMin(t0_, t1_);
float t1 = PxMax(t0_, t1_);
if(t0 > tlast) return false;
if(t1 < tfirst) return false;
// if(t1 < tlast) tlast = t1;
tlast = PxMin(t1, tlast);
// if(t0 > tfirst) tfirst = t0;
tfirst = PxMax(t0, tfirst);
return true;
}
PX_FORCE_INLINE int testSeparationAxes( const PxTriangle& tri, const PxVec3& extents,
const PxVec3& normal, const PxVec3& dir, const PxVec3& oneOverDir, float tmax, float& tcoll)
{
bool bValidMTD = true;
float tfirst = -FLT_MAX;
float tlast = FLT_MAX;
// Triangle normal
if(!testAxis(tri, extents, dir, normal, bValidMTD, tfirst, tlast))
return 0;
// Box normals
if(!testAxisXYZ<0>(tri, extents, dir, oneOverDir.x, bValidMTD, tfirst, tlast))
return 0;
if(!testAxisXYZ<1>(tri, extents, dir, oneOverDir.y, bValidMTD, tfirst, tlast))
return 0;
if(!testAxisXYZ<2>(tri, extents, dir, oneOverDir.z, bValidMTD, tfirst, tlast))
return 0;
// Edges
for(PxU32 i=0; i<3; i++)
{
int ip1 = int(i+1);
if(i>=2) ip1 = 0;
const PxVec3 TriEdge = tri.verts[ip1] - tri.verts[i];
{
const PxVec3 Sep = cross100(TriEdge);
if((Sep.dot(Sep))>=1.0E-6f && !testAxis(tri, extents, dir, Sep, bValidMTD, tfirst, tlast))
return 0;
}
{
const PxVec3 Sep = cross010(TriEdge);
if((Sep.dot(Sep))>=1.0E-6f && !testAxis(tri, extents, dir, Sep, bValidMTD, tfirst, tlast))
return 0;
}
{
const PxVec3 Sep = cross001(TriEdge);
if((Sep.dot(Sep))>=1.0E-6f && !testAxis(tri, extents, dir, Sep, bValidMTD, tfirst, tlast))
return 0;
}
}
if(tfirst > tmax || tlast < 0.0f)
return 0;
if(tfirst <= 0.0f)
{
if(!bValidMTD)
return 0;
tcoll = 0.0f;
}
else tcoll = tfirst;
return 1;
}
//! Inlined version of triBoxSweepTestBoxSpace. See that other function for comments.
PX_FORCE_INLINE int triBoxSweepTestBoxSpace_inlined(const PxTriangle& tri, const PxVec3& extents, const PxVec3& dir, const PxVec3& oneOverDir, float tmax, float& toi, PxU32 doBackfaceCulling)
{
// Create triangle normal
PxVec3 triNormal;
tri.denormalizedNormal(triNormal);
// Backface culling
if(doBackfaceCulling && (triNormal.dot(dir)) >= 0.0f) // ">=" is important !
return 0;
// The SAT test will properly detect initial overlaps, no need for extra tests
return testSeparationAxes(tri, extents, triNormal, dir, oneOverDir, tmax, toi);
}
/**
Sweeps a box against a triangle, using a 'SAT' approach (Separating Axis Theorem).
The test is performed in box-space, i.e. the box is axis-aligned and its center is (0,0,0). In other words it is
defined by its extents alone. The triangle must have been transformed to this "box-space" before calling the function.
\param tri [in] triangle in box-space
\param extents [in] box extents
\param dir [in] sweep direction. Does not need to be normalized.
\param oneOverDir [in] precomputed inverse of sweep direction
\param tmax [in] sweep length
\param toi [out] time of impact/impact distance. Does not need to be initialized before calling the function.
\param doBackfaceCulling [in] true to enable backface culling, false for double-sided triangles
\return non-zero value if an impact has been found (in which case returned 'toi' value is valid)
*/
int triBoxSweepTestBoxSpace(const PxTriangle& tri, const PxVec3& extents, const PxVec3& dir, const PxVec3& oneOverDir, float tmax, float& toi, bool doBackfaceCulling);
} // namespace Gu
}
#endif
| 7,962 | C | 32.599156 | 192 | 0.686385 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepCapsuleTriangle.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SWEEP_CAPSULE_TRIANGLE_H
#define GU_SWEEP_CAPSULE_TRIANGLE_H
#include "foundation/PxVec3.h"
#include "PxQueryReport.h"
namespace physx
{
class PxTriangle;
namespace Gu
{
class BoxPadded;
class Capsule;
/**
Sweeps a capsule against a set of triangles.
\param nbTris [in] number of triangles in input array
\param triangles [in] array of input triangles
\param capsule [in] the capsule
\param unitDir [in] sweep's unit direcion
\param distance [in] sweep's length
\param cachedIndex [in] cached triangle index, or NULL. Cached triangle will be tested first.
\param hit [out] results
\param triNormalOut [out] triangle normal
\param hitFlags [in] query modifiers
\param isDoubleSided [in] true if input triangles are double-sided
\param cullBox [in] additional/optional culling box. Triangles not intersecting the box are quickly discarded.
\warning if using a cullbox, make sure all triangles can be safely V4Loaded (i.e. allocate 4 more bytes after last triangle)
\return true if an impact has been found
*/
bool sweepCapsuleTriangles_Precise( PxU32 nbTris, const PxTriangle* PX_RESTRICT triangles, // Triangle data
const Capsule& capsule, // Capsule data
const PxVec3& unitDir, const PxReal distance, // Ray data
const PxU32* PX_RESTRICT cachedIndex, // Cache data
PxGeomSweepHit& hit, PxVec3& triNormalOut, // Results
PxHitFlags hitFlags, bool isDoubleSided, // Query modifiers
const BoxPadded* cullBox=NULL); // Cull data
} // namespace Gu
}
#endif
| 3,292 | C | 43.499999 | 125 | 0.741495 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepSphereCapsule.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSweepSphereCapsule.h"
#include "GuSphere.h"
#include "GuCapsule.h"
#include "GuDistancePointSegment.h"
#include "GuSweepSphereSphere.h"
#include "GuIntersectionRayCapsule.h"
using namespace physx;
using namespace Gu;
bool Gu::sweepSphereCapsule(const Sphere& sphere, const Capsule& lss, const PxVec3& dir, PxReal length, PxReal& d, PxVec3& ip, PxVec3& nrm, PxHitFlags hitFlags)
{
const PxReal radiusSum = lss.radius + sphere.radius;
if(!(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP))
{
// PT: test if shapes initially overlap
if(distancePointSegmentSquared(lss.p0, lss.p1, sphere.center)<radiusSum*radiusSum)
{
d = 0.0f;
nrm = -dir;
return true;
}
}
if(lss.p0 == lss.p1)
{
// Sphere vs. sphere
if(sweepSphereSphere(sphere.center, sphere.radius, lss.p0, lss.radius, -dir*length, d, nrm))
{
d*=length;
// if(hitFlags & PxHitFlag::ePOSITION) // PT: TODO
ip = sphere.center + nrm * sphere.radius;
return true;
}
return false;
}
// Create inflated capsule
Capsule Inflated(lss.p0, lss.p1, radiusSum);
// Raycast against it
PxReal t = 0.0f;
if(intersectRayCapsule(sphere.center, dir, Inflated, t))
{
if(t>=0.0f && t<=length)
{
d = t;
// PT: TODO:
// const PxIntBool needsImpactPoint = hitFlags & PxHitFlag::ePOSITION;
// if(needsImpactPoint || hitFlags & PxHitFlag::eNORMAL)
{
// Move capsule against sphere
const PxVec3 tdir = t*dir;
Inflated.p0 -= tdir;
Inflated.p1 -= tdir;
// Compute closest point between moved capsule & sphere
distancePointSegmentSquared(Inflated, sphere.center, &t);
Inflated.computePoint(ip, t);
// Normal
nrm = (ip - sphere.center);
nrm.normalize();
// if(needsImpactPoint) // PT: TODO
ip -= nrm * lss.radius;
}
return true;
}
}
return false;
}
| 3,516 | C++ | 33.145631 | 160 | 0.717861 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepBoxBox.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuSweepBoxBox.h"
#include "GuBox.h"
#include "GuIntersectionBoxBox.h"
#include "GuIntersectionRayBox.h"
#include "GuIntersectionEdgeEdge.h"
#include "GuSweepSharedTests.h"
#include "foundation/PxMat34.h"
#include "GuSweepTriangleUtils.h"
#include "GuInternal.h"
using namespace physx;
using namespace Gu;
namespace
{
// PT: TODO: get rid of this copy
static const PxReal gFatBoxEdgeCoeff = 0.01f;
// PT: TODO: get rid of this copy
static const PxVec3 gNearPlaneNormal[] =
{
PxVec3(1.0f, 0.0f, 0.0f),
PxVec3(0.0f, 1.0f, 0.0f),
PxVec3(0.0f, 0.0f, 1.0f),
PxVec3(-1.0f, 0.0f, 0.0f),
PxVec3(0.0f, -1.0f, 0.0f),
PxVec3(0.0f, 0.0f, -1.0f)
};
#define INVSQRT2 0.707106781188f //!< 1 / sqrt(2)
static PxVec3 EdgeNormals[] =
{
PxVec3(0, -INVSQRT2, -INVSQRT2), // 0-1
PxVec3(INVSQRT2, 0, -INVSQRT2), // 1-2
PxVec3(0, INVSQRT2, -INVSQRT2), // 2-3
PxVec3(-INVSQRT2, 0, -INVSQRT2), // 3-0
PxVec3(0, INVSQRT2, INVSQRT2), // 7-6
PxVec3(INVSQRT2, 0, INVSQRT2), // 6-5
PxVec3(0, -INVSQRT2, INVSQRT2), // 5-4
PxVec3(-INVSQRT2, 0, INVSQRT2), // 4-7
PxVec3(INVSQRT2, -INVSQRT2, 0), // 1-5
PxVec3(INVSQRT2, INVSQRT2, 0), // 6-2
PxVec3(-INVSQRT2, INVSQRT2, 0), // 3-7
PxVec3(-INVSQRT2, -INVSQRT2, 0) // 4-0
};
// PT: TODO: get rid of this copy
static const PxVec3* getBoxLocalEdgeNormals()
{
return EdgeNormals;
}
/**
Returns world edge normal
\param edgeIndex [in] 0 <= edge index < 12
\param worldNormal [out] edge normal in world space
*/
static void computeBoxWorldEdgeNormal(const Box& box, PxU32 edgeIndex, PxVec3& worldNormal)
{
PX_ASSERT(edgeIndex<12);
worldNormal = box.rotate(getBoxLocalEdgeNormals()[edgeIndex]);
}
}
// ### optimize! and refactor. And optimize for aabbs
bool Gu::sweepBoxBox(const Box& box0, const Box& box1, const PxVec3& dir, PxReal length, PxHitFlags hitFlags, PxGeomSweepHit& sweepHit)
{
if(!(hitFlags & PxHitFlag::eASSUME_NO_INITIAL_OVERLAP))
{
// PT: test if shapes initially overlap
if(intersectOBBOBB(box0.extents, box0.center, box0.rot, box1.extents, box1.center, box1.rot, true))
{
sweepHit.flags = PxHitFlag::eNORMAL;
sweepHit.distance = 0.0f;
sweepHit.normal = -dir;
return true;
}
}
PxVec3 boxVertices0[8]; box0.computeBoxPoints(boxVertices0);
PxVec3 boxVertices1[8]; box1.computeBoxPoints(boxVertices1);
// float MinDist = PX_MAX_F32;
PxReal MinDist = length;
int col = -1;
// In following VF tests:
// - the direction is FW/BK since we project one box onto the other *and vice versa*
// - the normal reaction is FW/BK for the same reason
// Vertices1 against Box0
{
// We need:
// - Box0 in local space
const PxVec3 Min0 = -box0.extents;
const PxVec3 Max0 = box0.extents;
// - Vertices1 in Box0 space
PxMat34 worldToBox0;
computeWorldToBoxMatrix(worldToBox0, box0);
// - the dir in Box0 space
const PxVec3 localDir0 = worldToBox0.rotate(dir);
const PxVec3* boxNormals0 = gNearPlaneNormal;
for(PxU32 i=0; i<8; i++)
{
PxReal tnear, tfar;
const int plane = intersectRayAABB(Min0, Max0, worldToBox0.transform(boxVertices1[i]), -localDir0, tnear, tfar);
if(plane==-1 || tnear<0.0f)
continue;
if(tnear <= MinDist)
{
MinDist = tnear;
sweepHit.normal = box0.rotate(boxNormals0[plane]);
sweepHit.position = boxVertices1[i];
col = 0;
}
}
}
// Vertices0 against Box1
{
// We need:
// - Box1 in local space
const PxVec3 Min1 = -box1.extents;
const PxVec3 Max1 = box1.extents;
// - Vertices0 in Box1 space
PxMat34 worldToBox1;
computeWorldToBoxMatrix(worldToBox1, box1);
// - the dir in Box1 space
const PxVec3 localDir1 = worldToBox1.rotate(dir);
const PxVec3* boxNormals1 = gNearPlaneNormal;
for(PxU32 i=0; i<8; i++)
{
PxReal tnear, tfar;
const int plane = intersectRayAABB(Min1, Max1, worldToBox1.transform(boxVertices0[i]), localDir1, tnear, tfar);
if(plane==-1 || tnear<0.0f)
continue;
if(tnear <= MinDist)
{
MinDist = tnear;
sweepHit.normal = box1.rotate(-boxNormals1[plane]);
sweepHit.position = boxVertices0[i] + tnear * dir;
col = 1;
}
}
}
PxVec3 p1s, p2s, p3s, p4s;
{
const PxU8* PX_RESTRICT edges0 = getBoxEdges();
const PxU8* PX_RESTRICT edges1 = getBoxEdges();
PxVec3 edgeNormals0[12];
PxVec3 edgeNormals1[12];
for(PxU32 i=0; i<12; i++)
computeBoxWorldEdgeNormal(box0, i, edgeNormals0[i]);
for(PxU32 i=0; i<12; i++)
computeBoxWorldEdgeNormal(box1, i, edgeNormals1[i]);
// Loop through box edges
for(PxU32 i=0; i<12; i++) // 12 edges
{
if(!(edgeNormals0[i].dot(dir) >= 0.0f))
continue;
// Catch current box edge // ### one vertex already known using line-strips
// Make it fat ###
PxVec3 p1 = boxVertices0[edges0[i*2+0]];
PxVec3 p2 = boxVertices0[edges0[i*2+1]];
makeFatEdge(p1, p2, gFatBoxEdgeCoeff);
// Loop through box edges
for(PxU32 j=0;j<12;j++)
{
if(edgeNormals1[j].dot(dir) >= 0.0f)
continue;
// Orientation culling
// PT: this was commented for some reason, but it fixes the "stuck" bug reported by Ubi.
// So I put it back. We'll have to see whether it produces Bad Things in particular cases.
if(edgeNormals0[i].dot(edgeNormals1[j]) >= 0.0f)
continue;
// Catch current box edge
// Make it fat ###
PxVec3 p3 = boxVertices1[edges1[j*2+0]];
PxVec3 p4 = boxVertices1[edges1[j*2+1]];
makeFatEdge(p3, p4, gFatBoxEdgeCoeff);
PxReal Dist;
PxVec3 ip;
if(intersectEdgeEdge(p1, p2, dir, p3, p4, Dist, ip))
{
if(Dist<=MinDist)
{
p1s = p1;
p2s = p2;
p3s = p3;
p4s = p4;
sweepHit.position = ip + Dist * dir;
col = 2;
MinDist = Dist;
}
}
}
}
}
if(col==-1)
return false;
if(col==2)
{
computeEdgeEdgeNormal(sweepHit.normal, p1s, p2s-p1s, p3s, p4s-p3s, dir, MinDist);
sweepHit.normal.normalize();
}
sweepHit.flags = PxHitFlag::eNORMAL|PxHitFlag::ePOSITION;
sweepHit.distance = MinDist;
return true;
}
| 7,686 | C++ | 27.365314 | 135 | 0.68319 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/sweep/GuSweepTriangleUtils.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_SWEEP_TRIANGLE_UTILS_H
#define GU_SWEEP_TRIANGLE_UTILS_H
#include "geometry/PxTriangle.h"
#include "PxQueryReport.h"
#include "GuSweepSharedTests.h"
#include "GuInternal.h"
namespace physx
{
namespace Gu
{
// PT: computes proper impact data for sphere-sweep-vs-tri, after the closest tri has been found.
void computeSphereTriImpactData(PxVec3& hit, PxVec3& normal, const PxVec3& center, const PxVec3& dir, float t, const PxTriangle& tri);
// PT: computes proper impact data for box-sweep-vs-tri, after the closest tri has been found.
void computeBoxTriImpactData(PxVec3& hit, PxVec3& normal, const PxVec3& boxExtents, const PxVec3& localDir, const PxTriangle& triInBoxSpace, PxReal impactDist);
// PT: computes impact normal between two edges. Produces better normals than just the EE cross product.
// This version properly computes the closest points between two colliding edges and makes a normal from these.
void computeEdgeEdgeNormal(PxVec3& normal, const PxVec3& p1, const PxVec3& p2_p1, const PxVec3& p3, const PxVec3& p4_p3, const PxVec3& dir, float d);
// PT: small function just to avoid duplicating the code.
// Returns index of first triangle we should process (when processing arrays of input triangles)
PX_FORCE_INLINE PxU32 getInitIndex(const PxU32* PX_RESTRICT cachedIndex, PxU32 nbTris)
{
PxU32 initIndex = 0; // PT: by default the first triangle to process is just the first one in the array
if(cachedIndex) // PT: but if we cached the last closest triangle from a previous call...
{
PX_ASSERT(*cachedIndex < nbTris);
PX_UNUSED(nbTris);
initIndex = *cachedIndex; // PT: ...then we should start with that one, to potentially shrink the ray as early as possible
}
return initIndex;
}
// PT: quick triangle rejection for sphere-based sweeps.
// Please refer to %SDKRoot%\InternalDocumentation\GU\cullTriangle.png for details & diagram.
PX_FORCE_INLINE bool cullTriangle(const PxVec3* PX_RESTRICT triVerts, const PxVec3& dir, PxReal radius, PxReal t, const PxReal dpc0)
{
// PT: project triangle on axis
const PxReal dp0 = triVerts[0].dot(dir);
const PxReal dp1 = triVerts[1].dot(dir);
const PxReal dp2 = triVerts[2].dot(dir);
// PT: keep min value = earliest possible impact distance
PxReal dp = dp0;
dp = physx::intrinsics::selectMin(dp, dp1);
dp = physx::intrinsics::selectMin(dp, dp2);
// PT: make sure we keep triangles that are about as close as best current distance
radius += 0.001f + GU_EPSILON_SAME_DISTANCE;
// PT: if earliest possible impact distance for this triangle is already larger than
// sphere's current best known impact distance, we can skip the triangle
if(dp>dpc0 + t + radius)
{
//PX_ASSERT(resx == 0.0f);
return false;
}
// PT: if triangle is fully located before the sphere's initial position, skip it too
const PxReal dpc1 = dpc0 - radius;
if(dp0<dpc1 && dp1<dpc1 && dp2<dpc1)
{
//PX_ASSERT(resx == 0.0f);
return false;
}
//PX_ASSERT(resx != 0.0f);
return true;
}
// PT: quick quad rejection for sphere-based sweeps. Same as for triangle, adapted for one more vertex.
PX_FORCE_INLINE bool cullQuad(const PxVec3* PX_RESTRICT quadVerts, const PxVec3& dir, PxReal radius, PxReal t, const PxReal dpc0)
{
// PT: project quad on axis
const PxReal dp0 = quadVerts[0].dot(dir);
const PxReal dp1 = quadVerts[1].dot(dir);
const PxReal dp2 = quadVerts[2].dot(dir);
const PxReal dp3 = quadVerts[3].dot(dir);
// PT: keep min value = earliest possible impact distance
PxReal dp = dp0;
dp = physx::intrinsics::selectMin(dp, dp1);
dp = physx::intrinsics::selectMin(dp, dp2);
dp = physx::intrinsics::selectMin(dp, dp3);
// PT: make sure we keep quads that are about as close as best current distance
radius += 0.001f + GU_EPSILON_SAME_DISTANCE;
// PT: if earliest possible impact distance for this quad is already larger than
// sphere's current best known impact distance, we can skip the quad
if(dp>dpc0 + t + radius)
return false;
// PT: if quad is fully located before the sphere's initial position, skip it too
const float dpc1 = dpc0 - radius;
if(dp0<dpc1 && dp1<dpc1 && dp2<dpc1 && dp3<dpc1)
return false;
return true;
}
// PT: computes distance between a point 'point' and a segment. The segment is defined as a starting point 'p0'
// and a direction vector 'dir' plus a length 't'. Segment's endpoint is p0 + dir * t.
//
// point
// o
// __/|
// __/ / |
// __/ / |(B)
// __/ (A)/ |
// __/ / | dir
// p0 o/---------o---------------o-- -->
// t (t<=fT) t (t>fT)
// return (A)^2 return (B)^2
//
// |<-------------->|
// fT
//
PX_FORCE_INLINE PxReal squareDistance(const PxVec3& p0, const PxVec3& dir, PxReal t, const PxVec3& point)
{
PxVec3 diff = point - p0;
PxReal fT = diff.dot(dir);
fT = physx::intrinsics::selectMax(fT, 0.0f);
fT = physx::intrinsics::selectMin(fT, t);
diff -= fT*dir;
return diff.magnitudeSquared();
}
// PT: quick triangle culling for sphere-based sweeps
// Please refer to %SDKRoot%\InternalDocumentation\GU\coarseCulling.png for details & diagram.
PX_FORCE_INLINE bool coarseCullingTri(const PxVec3& center, const PxVec3& dir, PxReal t, PxReal radius, const PxVec3* PX_RESTRICT triVerts)
{
// PT: compute center of triangle ### could be precomputed?
const PxVec3 triCenter = (triVerts[0] + triVerts[1] + triVerts[2]) * (1.0f/3.0f);
// PT: distance between the triangle center and the swept path (an LSS)
// Same as: distancePointSegmentSquared(center, center+dir*t, TriCenter);
PxReal d = PxSqrt(squareDistance(center, dir, t, triCenter)) - radius - 0.0001f;
if (d < 0.0f) // The triangle center lies inside the swept sphere
return true;
d*=d;
// PT: coarse capsule-vs-triangle overlap test ### distances could be precomputed?
if(1)
{
if(d <= (triCenter-triVerts[0]).magnitudeSquared())
return true;
if(d <= (triCenter-triVerts[1]).magnitudeSquared())
return true;
if(d <= (triCenter-triVerts[2]).magnitudeSquared())
return true;
}
else
{
const float d0 = (triCenter-triVerts[0]).magnitudeSquared();
const float d1 = (triCenter-triVerts[1]).magnitudeSquared();
const float d2 = (triCenter-triVerts[2]).magnitudeSquared();
float triRadius = physx::intrinsics::selectMax(d0, d1);
triRadius = physx::intrinsics::selectMax(triRadius, d2);
if(d <= triRadius)
return true;
}
return false;
}
// PT: quick quad culling for sphere-based sweeps. Same as for triangle, adapted for one more vertex.
PX_FORCE_INLINE bool coarseCullingQuad(const PxVec3& center, const PxVec3& dir, PxReal t, PxReal radius, const PxVec3* PX_RESTRICT quadVerts)
{
// PT: compute center of quad ### could be precomputed?
const PxVec3 quadCenter = (quadVerts[0] + quadVerts[1] + quadVerts[2] + quadVerts[3]) * (1.0f/4.0f);
// PT: distance between the quad center and the swept path (an LSS)
PxReal d = PxSqrt(squareDistance(center, dir, t, quadCenter)) - radius - 0.0001f;
if (d < 0.0f) // The quad center lies inside the swept sphere
return true;
d*=d;
// PT: coarse capsule-vs-quad overlap test ### distances could be precomputed?
if(1)
{
if(d <= (quadCenter-quadVerts[0]).magnitudeSquared())
return true;
if(d <= (quadCenter-quadVerts[1]).magnitudeSquared())
return true;
if(d <= (quadCenter-quadVerts[2]).magnitudeSquared())
return true;
if(d <= (quadCenter-quadVerts[3]).magnitudeSquared())
return true;
}
return false;
}
// PT: combined triangle culling for sphere-based sweeps
PX_FORCE_INLINE bool rejectTriangle(const PxVec3& center, const PxVec3& unitDir, PxReal curT, PxReal radius, const PxVec3* PX_RESTRICT triVerts, const PxReal dpc0)
{
if(!coarseCullingTri(center, unitDir, curT, radius, triVerts))
return true;
if(!cullTriangle(triVerts, unitDir, radius, curT, dpc0))
return true;
return false;
}
// PT: combined quad culling for sphere-based sweeps
PX_FORCE_INLINE bool rejectQuad(const PxVec3& center, const PxVec3& unitDir, PxReal curT, PxReal radius, const PxVec3* PX_RESTRICT quadVerts, const PxReal dpc0)
{
if(!coarseCullingQuad(center, unitDir, curT, radius, quadVerts))
return true;
if(!cullQuad(quadVerts, unitDir, radius, curT, dpc0))
return true;
return false;
}
PX_FORCE_INLINE bool shouldFlipNormal(const PxVec3& normal, bool meshBothSides, bool isDoubleSided, const PxVec3& triangleNormal, const PxVec3& dir)
{
// PT: this function assumes that input normal is opposed to the ray/sweep direction. This is always
// what we want except when we hit a single-sided back face with 'meshBothSides' enabled.
if(!meshBothSides || isDoubleSided)
return false;
PX_ASSERT(normal.dot(dir) <= 0.0f); // PT: if this fails, the logic below cannot be applied
PX_UNUSED(normal);
return triangleNormal.dot(dir) > 0.0f; // PT: true for back-facing hits
}
PX_FORCE_INLINE bool shouldFlipNormal(const PxVec3& normal, bool meshBothSides, bool isDoubleSided, const PxTriangle& triangle, const PxVec3& dir, const PxTransform* pose)
{
// PT: this function assumes that input normal is opposed to the ray/sweep direction. This is always
// what we want except when we hit a single-sided back face with 'meshBothSides' enabled.
if(!meshBothSides || isDoubleSided)
return false;
PX_ASSERT(normal.dot(dir) <= 0.0f); // PT: if this fails, the logic below cannot be applied
PX_UNUSED(normal);
PxVec3 triangleNormal;
triangle.denormalizedNormal(triangleNormal);
if(pose)
triangleNormal = pose->rotate(triangleNormal);
return triangleNormal.dot(dir) > 0.0f; // PT: true for back-facing hits
}
// PT: implements the spec for IO sweeps in a single place (to ensure consistency)
PX_FORCE_INLINE bool setInitialOverlapResults(PxGeomSweepHit& hit, const PxVec3& unitDir, PxU32 faceIndex)
{
// PT: please write these fields in the order they are listed in the struct.
hit.faceIndex = faceIndex;
hit.flags = PxHitFlag::eNORMAL|PxHitFlag::eFACE_INDEX;
hit.normal = -unitDir;
hit.distance = 0.0f;
return true; // PT: true indicates a hit, saves some lines in calling code
}
PX_FORCE_INLINE void computeBoxLocalImpact( PxVec3& pos, PxVec3& normal, PxHitFlags& outFlags,
const Box& box, const PxVec3& localDir, const PxTriangle& triInBoxSpace,
const PxHitFlags inFlags, bool isDoubleSided, bool meshBothSides, PxReal impactDist)
{
if(inFlags & (PxHitFlag::eNORMAL|PxHitFlag::ePOSITION))
{
PxVec3 localPos, localNormal;
computeBoxTriImpactData(localPos, localNormal, box.extents, localDir, triInBoxSpace, impactDist);
if(inFlags & PxHitFlag::eNORMAL)
{
localNormal.normalize();
// PT: doing this after the 'rotate' minimizes errors when normal and dir are close to perpendicular
// ....but we must do it before the rotate now, because triangleNormal is in box space (and thus we
// need the normal with the proper orientation, in box space. We can't fix it after it's been rotated
// to box space.
// Technically this one is only here because of the EE cross product in the feature-based sweep.
// PT: TODO: revisit corresponding code in computeImpactData, get rid of ambiguity
// PT: TODO: this may not be needed anymore
if((localNormal.dot(localDir))>0.0f)
localNormal = -localNormal;
// PT: this one is to ensure the normal respects the mesh-both-sides/double-sided convention
if(shouldFlipNormal(localNormal, meshBothSides, isDoubleSided, triInBoxSpace, localDir, NULL))
localNormal = -localNormal;
normal = box.rotate(localNormal);
outFlags |= PxHitFlag::eNORMAL;
}
if(inFlags & PxHitFlag::ePOSITION)
{
pos = box.transform(localPos);
outFlags |= PxHitFlag::ePOSITION;
}
}
}
} // namespace Gu
}
#endif
| 13,699 | C | 39.532544 | 172 | 0.703555 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingConvexMesh.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuCooking.h"
#include "GuCookingConvexMeshBuilder.h"
#include "GuCookingQuickHullConvexHullLib.h"
#include "GuConvexMesh.h"
#include "foundation/PxAlloca.h"
#include "foundation/PxFPU.h"
#include "common/PxInsertionCallback.h"
using namespace physx;
using namespace Gu;
///////////////////////////////////////////////////////////////////////////////
PX_IMPLEMENT_OUTPUT_ERROR
///////////////////////////////////////////////////////////////////////////////
// cook convex mesh from given desc, internal function to be shared between create/cook convex mesh
static bool cookConvexMeshInternal(const PxCookingParams& params, const PxConvexMeshDesc& desc_, ConvexMeshBuilder& meshBuilder, ConvexHullLib* hullLib, PxConvexMeshCookingResult::Enum* condition)
{
if(condition)
*condition = PxConvexMeshCookingResult::eFAILURE;
if(!desc_.isValid())
return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "Cooking::cookConvexMesh: user-provided convex mesh descriptor is invalid!");
if(params.areaTestEpsilon <= 0.0f)
return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "Cooking::cookConvexMesh: provided cooking parameter areaTestEpsilon is invalid!");
if(params.planeTolerance < 0.0f)
return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "Cooking::cookConvexMesh: provided cooking parameter planeTolerance is invalid!");
PxConvexMeshDesc desc = desc_;
bool polygonsLimitReached = false;
// the convex will be cooked from provided points
if(desc_.flags & PxConvexFlag::eCOMPUTE_CONVEX)
{
PX_ASSERT(hullLib);
// clean up the indices information, it could have been set by accident
desc.flags &= ~PxConvexFlag::e16_BIT_INDICES;
desc.indices.count = 0;
desc.indices.data = NULL;
desc.indices.stride = 0;
desc.polygons.count = 0;
desc.polygons.data = NULL;
desc.polygons.stride = 0;
PxConvexMeshCookingResult::Enum res = hullLib->createConvexHull();
if(res == PxConvexMeshCookingResult::eSUCCESS || res == PxConvexMeshCookingResult::ePOLYGONS_LIMIT_REACHED)
{
if(res == PxConvexMeshCookingResult::ePOLYGONS_LIMIT_REACHED)
polygonsLimitReached = true;
hullLib->fillConvexMeshDesc(desc);
}
else
{
if((res == PxConvexMeshCookingResult::eZERO_AREA_TEST_FAILED) && condition)
{
*condition = PxConvexMeshCookingResult::eZERO_AREA_TEST_FAILED;
}
return false;
}
}
if(desc.points.count >= 256)
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Cooking::cookConvexMesh: user-provided hull must have less than 256 vertices!");
if(desc.polygons.count >= 256)
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Cooking::cookConvexMesh: user-provided hull must have less than 256 faces!");
if ((desc.flags & PxConvexFlag::eGPU_COMPATIBLE) || params.buildGPUData)
{
if (desc.points.count > 64)
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Cooking::cookConvexMesh: GPU-compatible user-provided hull must have less than 65 vertices!");
if (desc.polygons.count > 64)
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Cooking::cookConvexMesh: GPU-compatible user-provided hull must have less than 65 faces!");
}
if(!meshBuilder.build(desc, params.gaussMapLimit, false, hullLib))
return false;
PxConvexMeshCookingResult::Enum result = PxConvexMeshCookingResult::eSUCCESS;
if (polygonsLimitReached)
result = PxConvexMeshCookingResult::ePOLYGONS_LIMIT_REACHED;
// AD: we check this outside of the actual convex cooking because we can still cook a valid convex hull
// but we won't be able to use it on GPU.
if (((desc.flags & PxConvexFlag::eGPU_COMPATIBLE) || params.buildGPUData) && !meshBuilder.checkExtentRadiusRatio())
{
result = PxConvexMeshCookingResult::eNON_GPU_COMPATIBLE;
outputError<PxErrorCode::eDEBUG_WARNING>(__LINE__, "Cooking::cookConvexMesh: GPU-compatible convex hull could not be built because of oblong shape. Will fall back to CPU collision, particles and deformables will not collide with this mesh!");
}
if(condition)
*condition = result;
return true;
}
static ConvexHullLib* createHullLib(PxConvexMeshDesc& desc, const PxCookingParams& params)
{
if(desc.flags & PxConvexFlag::eCOMPUTE_CONVEX)
{
const PxU16 gpuMaxVertsLimit = 64;
const PxU16 gpuMaxFacesLimit = 64;
// GRB supports 64 verts max
if((desc.flags & PxConvexFlag::eGPU_COMPATIBLE) || params.buildGPUData)
{
desc.vertexLimit = PxMin(desc.vertexLimit, gpuMaxVertsLimit);
desc.polygonLimit = PxMin(desc.polygonLimit, gpuMaxFacesLimit);
}
return PX_NEW(QuickHullConvexHullLib) (desc, params);
}
return NULL;
}
bool immediateCooking::cookConvexMesh(const PxCookingParams& params, const PxConvexMeshDesc& desc_, PxOutputStream& stream, PxConvexMeshCookingResult::Enum* condition)
{
PX_FPU_GUARD;
// choose cooking library if needed
PxConvexMeshDesc desc = desc_;
ConvexHullLib* hullLib = createHullLib(desc, params);
ConvexMeshBuilder meshBuilder(params.buildGPUData);
if(!cookConvexMeshInternal(params, desc, meshBuilder, hullLib, condition))
{
PX_DELETE(hullLib);
return false;
}
// save the cooked results into stream
if(!meshBuilder.save(stream, platformMismatch()))
{
if(condition)
*condition = PxConvexMeshCookingResult::eFAILURE;
PX_DELETE(hullLib);
return false;
}
PX_DELETE(hullLib);
return true;
}
PxConvexMesh* immediateCooking::createConvexMesh(const PxCookingParams& params, const PxConvexMeshDesc& desc_, PxInsertionCallback& insertionCallback, PxConvexMeshCookingResult::Enum* condition)
{
PX_FPU_GUARD;
// choose cooking library if needed
PxConvexMeshDesc desc = desc_;
ConvexHullLib* hullLib = createHullLib(desc, params);
// cook the mesh
ConvexMeshBuilder meshBuilder(params.buildGPUData);
if(!cookConvexMeshInternal(params, desc, meshBuilder, hullLib, condition))
{
PX_DELETE(hullLib);
return NULL;
}
// copy the constructed data into the new mesh
ConvexHullInitData meshData;
meshBuilder.copy(meshData);
// insert into physics
PxConvexMesh* convexMesh = static_cast<PxConvexMesh*>(insertionCallback.buildObjectFromData(PxConcreteType::eCONVEX_MESH, &meshData));
if(!convexMesh)
{
if(condition)
*condition = PxConvexMeshCookingResult::eFAILURE;
PX_DELETE(hullLib);
return NULL;
}
PX_DELETE(hullLib);
return convexMesh;
}
bool immediateCooking::validateConvexMesh(const PxCookingParams& params, const PxConvexMeshDesc& desc)
{
ConvexMeshBuilder mesh(params.buildGPUData);
return mesh.build(desc, params.gaussMapLimit, true);
}
bool immediateCooking::computeHullPolygons(const PxCookingParams& params, const PxSimpleTriangleMesh& mesh, PxAllocatorCallback& inCallback, PxU32& nbVerts, PxVec3*& vertices,
PxU32& nbIndices, PxU32*& indices, PxU32& nbPolygons, PxHullPolygon*& hullPolygons)
{
PxVec3* geometry = reinterpret_cast<PxVec3*>(PxAlloca(sizeof(PxVec3)*mesh.points.count));
immediateCooking::gatherStrided(mesh.points.data, geometry, mesh.points.count, sizeof(PxVec3), mesh.points.stride);
PxU32* topology = reinterpret_cast<PxU32*>(PxAlloca(sizeof(PxU32)*3*mesh.triangles.count));
if(mesh.flags & PxMeshFlag::e16_BIT_INDICES)
{
// conversion; 16 bit index -> 32 bit index & stride
PxU32* dest = topology;
const PxU32* pastLastDest = topology + 3*mesh.triangles.count;
const PxU8* source = reinterpret_cast<const PxU8*>(mesh.triangles.data);
while (dest < pastLastDest)
{
const PxU16 * trig16 = reinterpret_cast<const PxU16*>(source);
*dest++ = trig16[0];
*dest++ = trig16[1];
*dest++ = trig16[2];
source += mesh.triangles.stride;
}
}
else
{
immediateCooking::gatherStrided(mesh.triangles.data, topology, mesh.triangles.count, sizeof(PxU32) * 3, mesh.triangles.stride);
}
ConvexMeshBuilder meshBuilder(params.buildGPUData);
if(!meshBuilder.computeHullPolygons(mesh.points.count, geometry, mesh.triangles.count, topology, inCallback, nbVerts, vertices, nbIndices, indices, nbPolygons, hullPolygons))
return false;
return true;
}
| 9,695 | C++ | 37.023529 | 244 | 0.744714 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingGrbTriangleMesh.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_COOKING_GRB_TRIANGLE_MESH_H
#define GU_COOKING_GRB_TRIANGLE_MESH_H
#include "foundation/PxPlane.h"
#include "foundation/PxSort.h"
#include "GuMeshData.h"
#include "GuTriangle.h"
#include "GuEdgeList.h"
#include "cooking/PxCooking.h"
#include "CmRadixSort.h"
//#define CHECK_OLD_CODE_VS_NEW_CODE
namespace physx
{
namespace Gu
{
PX_ALIGN_PREFIX(16)
struct uint4
{
unsigned int x, y, z, w;
}
PX_ALIGN_SUFFIX(16);
// TODO avoroshilov: remove duplicate definitions
static const PxU32 BOUNDARY = 0xffffffff;
static const PxU32 NONCONVEX_FLAG = 0x80000000;
#ifdef CHECK_OLD_CODE_VS_NEW_CODE
struct EdgeTriLookup
{
PxU32 edgeId0, edgeId1;
PxU32 triId;
bool operator < (const EdgeTriLookup& edge1) const
{
return edgeId0 < edge1.edgeId0 || (edgeId0 == edge1.edgeId0 && edgeId1 < edge1.edgeId1);
}
bool operator <=(const EdgeTriLookup& edge1) const
{
return edgeId0 < edge1.edgeId0 || (edgeId0 == edge1.edgeId0 && edgeId1 <= edge1.edgeId1);
}
};
static PxU32 binarySearch(const EdgeTriLookup* __restrict data, const PxU32 numElements, const EdgeTriLookup& value)
{
PxU32 left = 0;
PxU32 right = numElements;
while ((right - left) > 1)
{
const PxU32 pos = (left + right) / 2;
const EdgeTriLookup& element = data[pos];
if (element <= value)
{
left = pos;
}
else
{
right = pos;
}
}
return left;
}
// slightly different behavior from collide2: boundary edges are filtered out
static PxU32 findAdjacent(const PxVec3* triVertices, const PxVec3* triNormals, const IndexedTriangle32* triIndices,
PxU32 nbTris, PxU32 i0, PxU32 i1, const PxPlane& plane,
EdgeTriLookup* triLookups, PxU32 triangleIndex)
{
PxU32 result = BOUNDARY;
PxReal bestCos = -FLT_MAX;
EdgeTriLookup lookup;
lookup.edgeId0 = PxMin(i0, i1);
lookup.edgeId1 = PxMax(i0, i1);
PxU32 startIndex = binarySearch(triLookups, nbTris * 3, lookup);
for (PxU32 a = startIndex; a > 0; --a)
{
if (triLookups[a - 1].edgeId0 == lookup.edgeId0 && triLookups[a - 1].edgeId1 == lookup.edgeId1)
startIndex = a - 1;
else
break;
}
for (PxU32 a = startIndex; a < nbTris * 3; ++a)
{
const EdgeTriLookup& edgeTri = triLookups[a];
if (edgeTri.edgeId0 != lookup.edgeId0 || edgeTri.edgeId1 != lookup.edgeId1)
break;
if (edgeTri.triId == triangleIndex)
continue;
const IndexedTriangle32& triIdx = triIndices[edgeTri.triId];
const PxU32 vIdx0 = triIdx.mRef[0];
const PxU32 vIdx1 = triIdx.mRef[1];
const PxU32 vIdx2 = triIdx.mRef[2];
const PxU32 other = vIdx0 + vIdx1 + vIdx2 - (i0 + i1);
const PxReal c = plane.n.dot(triNormals[edgeTri.triId]);
if (plane.distance(triVertices[other]) >= 0 && c > 0.f)
return NONCONVEX_FLAG | edgeTri.triId;
if (c>bestCos)
{
bestCos = c;
result = edgeTri.triId;
}
}
return result;
}
#endif
static PxU32 findAdjacent(const PxVec3* triVertices, const PxVec3* triNormals, const IndexedTriangle32* triIndices, const PxU32* faceByEdge, PxU32 nbTris, PxU32 i0, PxU32 i1, const PxPlane& plane, PxU32 triangleIndex)
{
PxU32 result = BOUNDARY;
PxReal bestCos = -FLT_MAX;
for(PxU32 i=0; i<nbTris; i++)
{
const PxU32 candidateTriIndex = faceByEdge[i];
if(triangleIndex==candidateTriIndex)
continue;
const IndexedTriangle32& triIdx = triIndices[candidateTriIndex];
const PxU32 vIdx0 = triIdx.mRef[0];
const PxU32 vIdx1 = triIdx.mRef[1];
const PxU32 vIdx2 = triIdx.mRef[2];
const PxU32 other = vIdx0 + vIdx1 + vIdx2 - (i0 + i1);
const PxReal c = plane.n.dot(triNormals[candidateTriIndex]);
if(plane.distance(triVertices[other]) >= 0 && c > 0.f)
return NONCONVEX_FLAG | candidateTriIndex;
if(c>bestCos)
{
bestCos = c;
result = candidateTriIndex;
}
}
return result;
}
static void buildAdjacencies(uint4* triAdjacencies, PxVec3* tempNormalsPerTri_prealloc, const PxVec3* triVertices, const IndexedTriangle32* triIndices, PxU32 nbTris)
{
#ifdef CHECK_OLD_CODE_VS_NEW_CODE
{
EdgeTriLookup* edgeLookups = PX_ALLOCATE(EdgeTriLookup, (nbTris * 3), "edgeLookups");
for (PxU32 i = 0; i < nbTris; i++)
{
const IndexedTriangle32& triIdx = triIndices[i];
const PxU32 vIdx0 = triIdx.mRef[0];
const PxU32 vIdx1 = triIdx.mRef[1];
const PxU32 vIdx2 = triIdx.mRef[2];
tempNormalsPerTri_prealloc[i] = (triVertices[vIdx1] - triVertices[vIdx0]).cross(triVertices[vIdx2] - triVertices[vIdx0]).getNormalized();
edgeLookups[i * 3].edgeId0 = PxMin(vIdx0, vIdx1);
edgeLookups[i * 3].edgeId1 = PxMax(vIdx0, vIdx1);
edgeLookups[i * 3].triId = i;
edgeLookups[i * 3 + 1].edgeId0 = PxMin(vIdx1, vIdx2);
edgeLookups[i * 3 + 1].edgeId1 = PxMax(vIdx1, vIdx2);
edgeLookups[i * 3 + 1].triId = i;
edgeLookups[i * 3 + 2].edgeId0 = PxMin(vIdx0, vIdx2);
edgeLookups[i * 3 + 2].edgeId1 = PxMax(vIdx0, vIdx2);
edgeLookups[i * 3 + 2].triId = i;
}
PxSort<EdgeTriLookup>(edgeLookups, PxU32(nbTris * 3));
for (PxU32 i = 0; i < nbTris; i++)
{
const IndexedTriangle32& triIdx = triIndices[i];
const PxU32 vIdx0 = triIdx.mRef[0];
const PxU32 vIdx1 = triIdx.mRef[1];
const PxU32 vIdx2 = triIdx.mRef[2];
const PxPlane triPlane(triVertices[vIdx0], tempNormalsPerTri_prealloc[i]);
uint4 triAdjIdx;
triAdjIdx.x = findAdjacent(triVertices, tempNormalsPerTri_prealloc, triIndices, nbTris, vIdx0, vIdx1, triPlane, edgeLookups, i);
triAdjIdx.y = findAdjacent(triVertices, tempNormalsPerTri_prealloc, triIndices, nbTris, vIdx1, vIdx2, triPlane, edgeLookups, i);
triAdjIdx.z = findAdjacent(triVertices, tempNormalsPerTri_prealloc, triIndices, nbTris, vIdx2, vIdx0, triPlane, edgeLookups, i);
triAdjIdx.w = 0;
triAdjacencies[i] = triAdjIdx;
}
PX_FREE(edgeLookups);
}
#endif
if(1)
{
EDGELISTCREATE create;
create.NbFaces = nbTris;
create.DFaces = triIndices->mRef;
create.WFaces = NULL;
create.FacesToEdges = true;
create.EdgesToFaces = true;
// PT: important: do NOT set the vertices, it triggers computation of edge flags that we don't need
//create.Verts = triVertices;
EdgeList edgeList;
if(edgeList.init(create))
{
for(PxU32 i=0; i<nbTris; i++)
{
const IndexedTriangle32& triIdx = triIndices[i];
const PxU32 vIdx0 = triIdx.mRef[0];
const PxU32 vIdx1 = triIdx.mRef[1];
const PxU32 vIdx2 = triIdx.mRef[2];
tempNormalsPerTri_prealloc[i] = (triVertices[vIdx1] - triVertices[vIdx0]).cross(triVertices[vIdx2] - triVertices[vIdx0]).getNormalized();
}
const EdgeTriangleData* edgeTriangleData = edgeList.getEdgeTriangles();
const EdgeDescData* edgeToTriangle = edgeList.getEdgeToTriangles();
const PxU32* faceByEdge = edgeList.getFacesByEdges();
PX_ASSERT(edgeList.getNbFaces()==nbTris);
for(PxU32 i=0; i<nbTris; i++)
{
const IndexedTriangle32& triIdx = triIndices[i];
const PxU32 vIdx0 = triIdx.mRef[0];
const PxU32 vIdx1 = triIdx.mRef[1];
const PxU32 vIdx2 = triIdx.mRef[2];
const PxPlane triPlane(triVertices[vIdx0], tempNormalsPerTri_prealloc[i]);
const EdgeTriangleData& edgeTri = edgeTriangleData[i];
const EdgeDescData& edgeData0 = edgeToTriangle[edgeTri.mLink[0] & MSH_EDGE_LINK_MASK];
const EdgeDescData& edgeData1 = edgeToTriangle[edgeTri.mLink[1] & MSH_EDGE_LINK_MASK];
const EdgeDescData& edgeData2 = edgeToTriangle[edgeTri.mLink[2] & MSH_EDGE_LINK_MASK];
uint4 triAdjIdx;
triAdjIdx.x = findAdjacent(triVertices, tempNormalsPerTri_prealloc, triIndices, faceByEdge + edgeData0.Offset, edgeData0.Count, vIdx0, vIdx1, triPlane, i);
triAdjIdx.y = findAdjacent(triVertices, tempNormalsPerTri_prealloc, triIndices, faceByEdge + edgeData1.Offset, edgeData1.Count, vIdx1, vIdx2, triPlane, i);
triAdjIdx.z = findAdjacent(triVertices, tempNormalsPerTri_prealloc, triIndices, faceByEdge + edgeData2.Offset, edgeData2.Count, vIdx2, vIdx0, triPlane, i);
triAdjIdx.w = 0;
#ifdef CHECK_OLD_CODE_VS_NEW_CODE
PX_ASSERT(triAdjacencies[i].x == triAdjIdx.x);
PX_ASSERT(triAdjacencies[i].y == triAdjIdx.y);
PX_ASSERT(triAdjacencies[i].z == triAdjIdx.z);
#endif
triAdjacencies[i] = triAdjIdx;
}
}
}
}
}
}
#endif
| 9,740 | C | 31.254967 | 217 | 0.715708 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingHF.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuCooking.h"
#include "GuHeightField.h"
#include "foundation/PxFPU.h"
#include "common/PxInsertionCallback.h"
#include "CmUtils.h"
using namespace physx;
using namespace Gu;
bool immediateCooking::cookHeightField(const PxHeightFieldDesc& desc, PxOutputStream& stream)
{
if(!desc.isValid())
return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "Cooking::cookHeightField: user-provided heightfield descriptor is invalid!");
PX_FPU_GUARD;
HeightField hf(NULL);
if(!hf.loadFromDesc(desc))
{
hf.releaseMemory();
return false;
}
if(!hf.save(stream, platformMismatch()))
{
hf.releaseMemory();
return false;
}
hf.releaseMemory();
return true;
}
PxHeightField* immediateCooking::createHeightField(const PxHeightFieldDesc& desc, PxInsertionCallback& insertionCallback)
{
if(!desc.isValid())
{
PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "Cooking::createHeightField: user-provided heightfield descriptor is invalid!");
return NULL;
}
PX_FPU_GUARD;
HeightField* hf;
PX_NEW_SERIALIZED(hf, HeightField)(NULL);
if(!hf->loadFromDesc(desc))
{
PX_DELETE(hf);
return NULL;
}
// create heightfield and set the HF data
HeightField* heightField = static_cast<HeightField*>(insertionCallback.buildObjectFromData(PxConcreteType::eHEIGHTFIELD, &hf->mData));
if(!heightField)
{
PX_DELETE(hf);
return NULL;
}
// copy the HeightField variables
heightField->mSampleStride = hf->mSampleStride;
heightField->mNbSamples = hf->mNbSamples;
heightField->mMinHeight = hf->mMinHeight;
heightField->mMaxHeight = hf->mMaxHeight;
heightField->mModifyCount = hf->mModifyCount;
PX_DELETE(hf);
return heightField;
}
| 3,394 | C++ | 32.613861 | 151 | 0.755745 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingBVH.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuCooking.h"
#include "GuBVH.h"
#include "foundation/PxFPU.h"
#include "cooking/PxBVHDesc.h"
#include "common/PxInsertionCallback.h"
using namespace physx;
using namespace Gu;
static bool buildBVH(const PxBVHDesc& desc, BVHData& data, const char* errorMessage)
{
if(!desc.isValid())
return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, errorMessage);
BVHBuildStrategy bs;
if(desc.buildStrategy==PxBVHBuildStrategy::eFAST)
bs = BVH_SPLATTER_POINTS;
else if(desc.buildStrategy==PxBVHBuildStrategy::eDEFAULT)
bs = BVH_SPLATTER_POINTS_SPLIT_GEOM_CENTER;
else //if(desc.buildStrategy==PxBVHBuildStrategy::eSAH)
bs = BVH_SAH;
return data.build(desc.bounds.count, desc.bounds.data, desc.bounds.stride, desc.enlargement, desc.numPrimsPerLeaf, bs);
}
bool immediateCooking::cookBVH(const PxBVHDesc& desc, PxOutputStream& stream)
{
PX_FPU_GUARD;
BVHData bvhData;
if(!buildBVH(desc, bvhData, "Cooking::cookBVH: user-provided BVH descriptor is invalid!"))
return false;
return bvhData.save(stream, platformMismatch());
}
PxBVH* immediateCooking::createBVH(const PxBVHDesc& desc, PxInsertionCallback& insertionCallback)
{
PX_FPU_GUARD;
BVHData bvhData;
if(!buildBVH(desc, bvhData, "Cooking::createBVH: user-provided BVH descriptor is invalid!"))
return NULL;
return static_cast<PxBVH*>(insertionCallback.buildObjectFromData(PxConcreteType::eBVH, &bvhData));
}
| 3,110 | C++ | 39.93421 | 120 | 0.767524 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingBigConvexDataBuilder.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_COOKING_BIG_CONVEX_DATA_BUILDER_H
#define GU_COOKING_BIG_CONVEX_DATA_BUILDER_H
#include "foundation/PxMemory.h"
#include "foundation/PxVecMath.h"
namespace physx
{
class BigConvexData;
class ConvexHullBuilder;
class BigConvexDataBuilder : public PxUserAllocated
{
public:
BigConvexDataBuilder(const Gu::ConvexHullData* hull, BigConvexData* gm, const PxVec3* hullVerts);
~BigConvexDataBuilder();
// Support vertex map
bool precompute(PxU32 subdiv);
bool initialize();
bool save(PxOutputStream& stream, bool platformMismatch) const;
bool computeValencies(const ConvexHullBuilder& meshBuilder);
//~Support vertex map
// Valencies
bool saveValencies(PxOutputStream& stream, bool platformMismatch) const;
//~Valencies
protected:
PX_FORCE_INLINE void precomputeSample(const PxVec3& dir, PxU8& startIndex, float negativeDir);
private:
const Gu::ConvexHullData* mHull;
BigConvexData* mSVM;
const PxVec3* mHullVerts;
};
}
#endif // BIG_CONVEX_DATA_BUILDER_H
| 2,761 | C | 37.901408 | 106 | 0.750453 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingVolumeIntegration.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_COOKING_VOLUME_INTEGRATION_H
#define GU_COOKING_VOLUME_INTEGRATION_H
/** \addtogroup foundation
@{
*/
#include "foundation/Px.h"
#include "foundation/PxVec3.h"
#include "foundation/PxMat33.h"
namespace physx
{
class PxSimpleTriangleMesh;
class PxConvexMeshDesc;
/**
\brief Data structure used to store mass properties.
*/
struct PxIntegrals
{
PxVec3 COM; //!< Center of mass
PxF64 mass; //!< Total mass
PxF64 inertiaTensor[3][3]; //!< Inertia tensor (mass matrix) relative to the origin
PxF64 COMInertiaTensor[3][3]; //!< Inertia tensor (mass matrix) relative to the COM
/**
\brief Retrieve the inertia tensor relative to the center of mass.
\param inertia Inertia tensor.
*/
void getInertia(PxMat33& inertia)
{
for(PxU32 j=0;j<3;j++)
{
for(PxU32 i=0;i<3;i++)
{
inertia(i,j) = PxF32(COMInertiaTensor[i][j]);
}
}
}
/**
\brief Retrieve the inertia tensor relative to the origin.
\param inertia Inertia tensor.
*/
void getOriginInertia(PxMat33& inertia)
{
for(PxU32 j=0;j<3;j++)
{
for(PxU32 i=0;i<3;i++)
{
inertia(i,j) = PxF32(inertiaTensor[i][j]);
}
}
}
};
bool computeVolumeIntegrals(const PxSimpleTriangleMesh& mesh, PxReal density, PxIntegrals& integrals);
// specialized method taking polygons directly, so we don't need to compute and store triangles for each polygon
bool computeVolumeIntegralsEberly(const PxConvexMeshDesc& mesh, PxReal density, PxIntegrals& integrals, const PxVec3& origin, bool useSimd); // Eberly simplified method
}
/** @} */
#endif
| 3,254 | C | 33.263158 | 171 | 0.734481 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingVolumeIntegration.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
/*
* This code computes volume integrals needed to compute mass properties of polyhedral bodies.
* Based on public domain code by Brian Mirtich.
*/
#include "foundation/PxMemory.h"
#include "geometry/PxSimpleTriangleMesh.h"
#include "cooking/PxConvexMeshDesc.h"
#include "GuCookingVolumeIntegration.h"
#include "GuConvexMeshData.h"
#include "foundation/PxUtilities.h"
#include "foundation/PxVecMath.h"
// PT: code archeology: this initially came from ICE (IceVolumeIntegration.h/cpp). Consider revisiting.
namespace physx
{
using namespace aos;
namespace
{
class VolumeIntegrator
{
PX_NOCOPY(VolumeIntegrator)
public:
VolumeIntegrator(const PxSimpleTriangleMesh& mesh, PxF64 density) : mMass(0.0), mDensity(density), mMesh(mesh) {}
~VolumeIntegrator() {}
bool computeVolumeIntegrals(PxIntegrals& ir);
private:
struct Normal
{
PxVec3 normal;
PxF32 w;
};
struct Face
{
PxF64 Norm[3];
PxF64 w;
PxU32 Verts[3];
};
// Data structures
PxF64 mMass; //!< Mass
PxF64 mDensity; //!< Density
const PxSimpleTriangleMesh& mMesh;
PxU32 mA; //!< Alpha
PxU32 mB; //!< Beta
PxU32 mC; //!< Gamma
// Projection integrals
PxF64 mP1;
PxF64 mPa; //!< Pi Alpha
PxF64 mPb; //!< Pi Beta
PxF64 mPaa; //!< Pi Alpha^2
PxF64 mPab; //!< Pi AlphaBeta
PxF64 mPbb; //!< Pi Beta^2
PxF64 mPaaa; //!< Pi Alpha^3
PxF64 mPaab; //!< Pi Alpha^2Beta
PxF64 mPabb; //!< Pi AlphaBeta^2
PxF64 mPbbb; //!< Pi Beta^3
// Face integrals
PxF64 mFa; //!< FAlpha
PxF64 mFb; //!< FBeta
PxF64 mFc; //!< FGamma
PxF64 mFaa; //!< FAlpha^2
PxF64 mFbb; //!< FBeta^2
PxF64 mFcc; //!< FGamma^2
PxF64 mFaaa; //!< FAlpha^3
PxF64 mFbbb; //!< FBeta^3
PxF64 mFccc; //!< FGamma^3
PxF64 mFaab; //!< FAlpha^2Beta
PxF64 mFbbc; //!< FBeta^2Gamma
PxF64 mFcca; //!< FGamma^2Alpha
// The 10 volume integrals
PxF64 mT0; //!< ~Total mass
PxF64 mT1[3]; //!< Location of the center of mass
PxF64 mT2[3]; //!< Moments of inertia
PxF64 mTP[3]; //!< Products of inertia
// Internal methods
// bool Init();
PxVec3 computeCenterOfMass();
void computeInertiaTensor(PxF64* J);
void computeCOMInertiaTensor(PxF64* J);
void computeFaceNormal(Face & f, PxU32 * indices);
void computeProjectionIntegrals(const Face& f);
void computeFaceIntegrals(const Face& f);
};
#define X 0u
#define Y 1u
#define Z 2u
void VolumeIntegrator::computeFaceNormal(Face & f, PxU32 * indices)
{
const PxU8 * vertPointer = reinterpret_cast<const PxU8*>(mMesh.points.data);
const PxU32 stride = mMesh.points.stride;
//two edges
const PxVec3 d1 = (*reinterpret_cast<const PxVec3 *>(vertPointer + stride * indices[1] )) - (*reinterpret_cast<const PxVec3 *>(vertPointer + stride * indices[0] ));
const PxVec3 d2 = (*reinterpret_cast<const PxVec3 *>(vertPointer + stride * indices[2] )) - (*reinterpret_cast<const PxVec3 *>(vertPointer + stride * indices[1] ));
PxVec3 normal = d1.cross(d2);
normal.normalizeSafe();
f.w = - PxF64(normal.dot((*reinterpret_cast<const PxVec3 *>(vertPointer + stride * indices[0] )) ));
f.Norm[0] = PxF64(normal.x);
f.Norm[1] = PxF64(normal.y);
f.Norm[2] = PxF64(normal.z);
}
/**
* Computes volume integrals for a polyhedron by summing surface integrals over its faces.
* \param ir [out] a result structure.
* \return true if success
*/
bool VolumeIntegrator::computeVolumeIntegrals(PxIntegrals& ir)
{
// Clear all integrals
mT0 = mT1[X] = mT1[Y] = mT1[Z] = mT2[X] = mT2[Y] = mT2[Z] = mTP[X] = mTP[Y] = mTP[Z] = 0;
Face f;
const PxU8* trigPointer = reinterpret_cast<const PxU8*>(mMesh.triangles.data);
const PxU32 nbTris = mMesh.triangles.count;
const PxU32 stride = mMesh.triangles.stride;
for(PxU32 i=0;i<nbTris;i++, trigPointer += stride)
{
if (mMesh.flags & PxMeshFlag::e16_BIT_INDICES)
{
f.Verts[0] = (reinterpret_cast<const PxU16 *>(trigPointer))[0];
f.Verts[1] = (reinterpret_cast<const PxU16 *>(trigPointer))[1];
f.Verts[2] = (reinterpret_cast<const PxU16 *>(trigPointer))[2];
}
else
{
f.Verts[0] = (reinterpret_cast<const PxU32 *>(trigPointer)[0]);
f.Verts[1] = (reinterpret_cast<const PxU32 *>(trigPointer)[1]);
f.Verts[2] = (reinterpret_cast<const PxU32 *>(trigPointer)[2]);
}
if (mMesh.flags & PxMeshFlag::eFLIPNORMALS)
{
PxU32 t = f.Verts[1];
f.Verts[1] = f.Verts[2];
f.Verts[2] = t;
}
//compute face normal:
computeFaceNormal(f,f.Verts);
if(f.Norm[X] * f.Norm[X] + f.Norm[Y] * f.Norm[Y] + f.Norm[Z] * f.Norm[Z] < 1e-20)
continue;
// Compute alpha/beta/gamma as the right-handed permutation of (x,y,z) that maximizes |n|
const PxF64 nx = fabs(f.Norm[X]);
const PxF64 ny = fabs(f.Norm[Y]);
const PxF64 nz = fabs(f.Norm[Z]);
if (nx > ny && nx > nz) mC = X;
else mC = (ny > nz) ? Y : Z;
mA = (mC + 1) % 3;
mB = (mA + 1) % 3;
// Compute face contribution
computeFaceIntegrals(f);
// Update integrals
mT0 += f.Norm[X] * ((mA == X) ? mFa : ((mB == X) ? mFb : mFc));
mT1[mA] += f.Norm[mA] * mFaa;
mT1[mB] += f.Norm[mB] * mFbb;
mT1[mC] += f.Norm[mC] * mFcc;
mT2[mA] += f.Norm[mA] * mFaaa;
mT2[mB] += f.Norm[mB] * mFbbb;
mT2[mC] += f.Norm[mC] * mFccc;
mTP[mA] += f.Norm[mA] * mFaab;
mTP[mB] += f.Norm[mB] * mFbbc;
mTP[mC] += f.Norm[mC] * mFcca;
}
mT1[X] /= 2; mT1[Y] /= 2; mT1[Z] /= 2;
mT2[X] /= 3; mT2[Y] /= 3; mT2[Z] /= 3;
mTP[X] /= 2; mTP[Y] /= 2; mTP[Z] /= 2;
// Fill result structure
ir.COM = computeCenterOfMass();
computeInertiaTensor(reinterpret_cast<PxF64*>(ir.inertiaTensor));
computeCOMInertiaTensor(reinterpret_cast<PxF64*>(ir.COMInertiaTensor));
ir.mass = mMass;
return true;
}
/**
* Computes the center of mass.
* \return The center of mass.
*/
PxVec3 VolumeIntegrator::computeCenterOfMass()
{
// Compute center of mass
PxVec3 COM(0.0f);
if(mT0!=0.0)
{
COM.x = float(mT1[X] / mT0);
COM.y = float(mT1[Y] / mT0);
COM.z = float(mT1[Z] / mT0);
}
return COM;
}
/**
* Setups the inertia tensor relative to the origin.
* \param it [out] the returned inertia tensor.
*/
void VolumeIntegrator::computeInertiaTensor(PxF64* it)
{
PxF64 J[3][3];
// Compute inertia tensor
J[X][X] = mDensity * (mT2[Y] + mT2[Z]);
J[Y][Y] = mDensity * (mT2[Z] + mT2[X]);
J[Z][Z] = mDensity * (mT2[X] + mT2[Y]);
J[X][Y] = J[Y][X] = - mDensity * mTP[X];
J[Y][Z] = J[Z][Y] = - mDensity * mTP[Y];
J[Z][X] = J[X][Z] = - mDensity * mTP[Z];
PxMemCopy(it, J, 9*sizeof(PxF64));
}
/**
* Setups the inertia tensor relative to the COM.
* \param it [out] the returned inertia tensor.
*/
void VolumeIntegrator::computeCOMInertiaTensor(PxF64* it)
{
PxF64 J[3][3];
mMass = mDensity * mT0;
const PxVec3 COM = computeCenterOfMass();
const PxVec3 MassCOM(PxF32(mMass) * COM);
const PxVec3 MassCOM2(MassCOM.x * COM.x, MassCOM.y * COM.y, MassCOM.z * COM.z);
// Compute initial inertia tensor
computeInertiaTensor(reinterpret_cast<PxF64*>(J));
// Translate inertia tensor to center of mass
// Huyghens' theorem:
// Jx'x' = Jxx - m*(YG^2+ZG^2)
// Jy'y' = Jyy - m*(ZG^2+XG^2)
// Jz'z' = Jzz - m*(XG^2+YG^2)
// XG, YG, ZG = new origin
// YG^2+ZG^2 = dx^2
J[X][X] -= PxF64(MassCOM2.y + MassCOM2.z);
J[Y][Y] -= PxF64(MassCOM2.z + MassCOM2.x);
J[Z][Z] -= PxF64(MassCOM2.x + MassCOM2.y);
// Huyghens' theorem:
// Jx'y' = Jxy - m*XG*YG
// Jy'z' = Jyz - m*YG*ZG
// Jz'x' = Jzx - m*ZG*XG
// ### IS THE SIGN CORRECT ?
J[X][Y] = J[Y][X] += PxF64(MassCOM.x * COM.y);
J[Y][Z] = J[Z][Y] += PxF64(MassCOM.y * COM.z);
J[Z][X] = J[X][Z] += PxF64(MassCOM.z * COM.x);
PxMemCopy(it, J, 9*sizeof(PxF64));
}
/**
* Computes integrals over a face projection from the coordinates of the projections vertices.
* \param f [in] a face structure.
*/
void VolumeIntegrator::computeProjectionIntegrals(const Face& f)
{
mP1 = mPa = mPb = mPaa = mPab = mPbb = mPaaa = mPaab = mPabb = mPbbb = 0.0;
const PxU8* vertPointer = reinterpret_cast<const PxU8*>(mMesh.points.data);
const PxU32 stride = mMesh.points.stride;
for(PxU32 i=0;i<3;i++)
{
const PxVec3& p0 = *reinterpret_cast<const PxVec3 *>(vertPointer + stride * (f.Verts[i]) );
const PxVec3& p1 = *reinterpret_cast<const PxVec3 *>(vertPointer + stride * (f.Verts[(i+1) % 3]) );
const PxF64 a0 = PxF64(p0[mA]);
const PxF64 b0 = PxF64(p0[mB]);
const PxF64 a1 = PxF64(p1[mA]);
const PxF64 b1 = PxF64(p1[mB]);
const PxF64 da = a1 - a0; // DeltaA
const PxF64 db = b1 - b0; // DeltaB
const PxF64 a0_2 = a0 * a0; // Alpha0^2
const PxF64 a0_3 = a0_2 * a0; // ...
const PxF64 a0_4 = a0_3 * a0;
const PxF64 b0_2 = b0 * b0;
const PxF64 b0_3 = b0_2 * b0;
const PxF64 b0_4 = b0_3 * b0;
const PxF64 a1_2 = a1 * a1;
const PxF64 a1_3 = a1_2 * a1;
const PxF64 b1_2 = b1 * b1;
const PxF64 b1_3 = b1_2 * b1;
const PxF64 C1 = a1 + a0;
const PxF64 Ca = a1*C1 + a0_2;
const PxF64 Caa = a1*Ca + a0_3;
const PxF64 Caaa = a1*Caa + a0_4;
const PxF64 Cb = b1*(b1 + b0) + b0_2;
const PxF64 Cbb = b1*Cb + b0_3;
const PxF64 Cbbb = b1*Cbb + b0_4;
const PxF64 Cab = 3*a1_2 + 2*a1*a0 + a0_2;
const PxF64 Kab = a1_2 + 2*a1*a0 + 3*a0_2;
const PxF64 Caab = a0*Cab + 4*a1_3;
const PxF64 Kaab = a1*Kab + 4*a0_3;
const PxF64 Cabb = 4*b1_3 + 3*b1_2*b0 + 2*b1*b0_2 + b0_3;
const PxF64 Kabb = b1_3 + 2*b1_2*b0 + 3*b1*b0_2 + 4*b0_3;
mP1 += db*C1;
mPa += db*Ca;
mPaa += db*Caa;
mPaaa += db*Caaa;
mPb += da*Cb;
mPbb += da*Cbb;
mPbbb += da*Cbbb;
mPab += db*(b1*Cab + b0*Kab);
mPaab += db*(b1*Caab + b0*Kaab);
mPabb += da*(a1*Cabb + a0*Kabb);
}
mP1 /= 2.0;
mPa /= 6.0;
mPaa /= 12.0;
mPaaa /= 20.0;
mPb /= -6.0;
mPbb /= -12.0;
mPbbb /= -20.0;
mPab /= 24.0;
mPaab /= 60.0;
mPabb /= -60.0;
}
#define SQR(x) ((x)*(x)) //!< Returns x square
#define CUBE(x) ((x)*(x)*(x)) //!< Returns x cube
/**
* Computes surface integrals over a polyhedral face from the integrals over its projection.
* \param f [in] a face structure.
*/
void VolumeIntegrator::computeFaceIntegrals(const Face& f)
{
computeProjectionIntegrals(f);
const PxF64 w = f.w;
const PxF64* n = f.Norm;
const PxF64 k1 = 1 / n[mC];
const PxF64 k2 = k1 * k1;
const PxF64 k3 = k2 * k1;
const PxF64 k4 = k3 * k1;
mFa = k1 * mPa;
mFb = k1 * mPb;
mFc = -k2 * (n[mA]*mPa + n[mB]*mPb + w*mP1);
mFaa = k1 * mPaa;
mFbb = k1 * mPbb;
mFcc = k3 * (SQR(n[mA])*mPaa + 2*n[mA]*n[mB]*mPab + SQR(n[mB])*mPbb + w*(2*(n[mA]*mPa + n[mB]*mPb) + w*mP1));
mFaaa = k1 * mPaaa;
mFbbb = k1 * mPbbb;
mFccc = -k4 * (CUBE(n[mA])*mPaaa + 3*SQR(n[mA])*n[mB]*mPaab
+ 3*n[mA]*SQR(n[mB])*mPabb + CUBE(n[mB])*mPbbb
+ 3*w*(SQR(n[mA])*mPaa + 2*n[mA]*n[mB]*mPab + SQR(n[mB])*mPbb)
+ w*w*(3*(n[mA]*mPa + n[mB]*mPb) + w*mP1));
mFaab = k1 * mPaab;
mFbbc = -k2 * (n[mA]*mPabb + n[mB]*mPbbb + w*mPbb);
mFcca = k3 * (SQR(n[mA])*mPaaa + 2*n[mA]*n[mB]*mPaab + SQR(n[mB])*mPabb + w*(2*(n[mA]*mPaa + n[mB]*mPab) + w*mPa));
}
/*
* This code computes volume integrals needed to compute mass properties of polyhedral bodies.
* Based on public domain code by David Eberly.
*/
class VolumeIntegratorEberly
{
PX_NOCOPY(VolumeIntegratorEberly)
public:
VolumeIntegratorEberly(const PxConvexMeshDesc& desc, PxF64 density) : mDesc(desc), mMass(0), mMassR(0), mDensity(density) {}
~VolumeIntegratorEberly() {}
bool computeVolumeIntegralsSIMD(PxIntegrals& ir, const PxVec3& origin);
bool computeVolumeIntegrals(PxIntegrals& ir, const PxVec3& origin);
private:
const PxConvexMeshDesc& mDesc;
PxF64 mMass;
PxReal mMassR;
PxF64 mDensity;
};
PX_FORCE_INLINE void subexpressions(PxF64 w0, PxF64 w1, PxF64 w2, PxF64& f1, PxF64& f2, PxF64& f3, PxF64& g0, PxF64& g1, PxF64& g2)
{
PxF64 temp0 = w0 + w1;
f1 = temp0 + w2;
PxF64 temp1 = w0*w0;
PxF64 temp2 = temp1 + w1*temp0;
f2 = temp2 + w2*f1;
f3 = w0*temp1 + w1*temp2 + w2*f2;
g0 = f2 + w0*(f1 + w0);
g1 = f2 + w1*(f1 + w1);
g2 = f2 + w2*(f1 + w2);
}
PX_FORCE_INLINE void subexpressionsSIMD(const Vec4V& w0, const Vec4V& w1, const Vec4V& w2,
Vec4V& f1, Vec4V& f2, Vec4V& f3, Vec4V& g0, Vec4V& g1, Vec4V& g2)
{
const Vec4V temp0 = V4Add(w0, w1);
f1 = V4Add(temp0, w2);
const Vec4V temp1 = V4Mul(w0,w0);
const Vec4V temp2 = V4MulAdd(w1, temp0, temp1);
f2 = V4MulAdd(w2, f1, temp2);
// f3 = w0.multiply(temp1) + w1.multiply(temp2) + w2.multiply(f2);
const Vec4V ad0 = V4Mul(w0, temp1);
const Vec4V ad1 = V4MulAdd(w1, temp2, ad0);
f3 = V4MulAdd(w2, f2, ad1);
g0 = V4MulAdd(w0, V4Add(f1, w0), f2); // f2 + w0.multiply(f1 + w0);
g1 = V4MulAdd(w1, V4Add(f1, w1), f2); // f2 + w1.multiply(f1 + w1);
g2 = V4MulAdd(w2, V4Add(f1, w2), f2); // f2 + w2.multiply(f1 + w2);
}
/**
* Computes volume integrals for a polyhedron by summing surface integrals over its faces. SIMD version
* \param ir [out] a result structure.
* \param origin [in] the origin of the mesh vertices. All vertices will be shifted accordingly prior to computing the volume integrals.
Can improve accuracy, for example, if the centroid is used in the case of a convex mesh. Note: the returned inertia will not be relative to this origin but relative to (0,0,0).
* \return true if success
*/
bool VolumeIntegratorEberly::computeVolumeIntegralsSIMD(PxIntegrals& ir, const PxVec3& origin)
{
FloatV mult = FLoad(1.0f/6.0f);
const Vec4V multV = V4Load(1.0f/24.0f);
const Vec4V multV2 = V4Load(1.0f/60.0f);
const Vec4V multVV = V4Load(1.0f/120.0f);
// order: 1, x, y, z, x^2, y^2, z^2, xy, yz, zx
FloatV intg = FLoad(0.0f);
Vec4V intgV = V4Load(0.0f);
Vec4V intgV2 = V4Load(0.0f);
Vec4V intgVV = V4Load(0.0f);
const Vec4V originV = Vec4V_From_PxVec3_WUndefined(origin);
const FloatV zeroV = FLoad(0.0f);
const PxVec3* hullVerts = static_cast<const PxVec3*> (mDesc.points.data);
const Gu::HullPolygonData* hullPolygons = static_cast<const Gu::HullPolygonData*> (mDesc.polygons.data);
for (PxU32 i = 0; i < mDesc.polygons.count; i++)
{
const Gu::HullPolygonData& polygon = hullPolygons[i];
const PxU8* data = static_cast<const PxU8*>(mDesc.indices.data) + polygon.mVRef8;
const PxU32 nbVerts = polygon.mNbVerts;
PX_ASSERT(nbVerts > 2);
const Vec4V normalV = V4LoadU(&polygon.mPlane.n.x);
for (PxU32 j = 0; j < nbVerts - 2; j++)
{
// Should be safe to V4Load, we allocate one more vertex each time
const Vec4V vertex0 = V4LoadU(&hullVerts[data[0]].x);
const Vec4V vertex1 = V4LoadU(&hullVerts[data[j + 1]].x);
const Vec4V vertex2 = V4LoadU(&hullVerts[data[j + 2]].x);
const Vec4V p0 = V4Sub(vertex0, originV);
Vec4V p1 = V4Sub(vertex1, originV);
Vec4V p2 = V4Sub(vertex2, originV);
const Vec4V p0YZX = V4PermYZXW(p0);
const Vec4V p1YZX = V4PermYZXW(p1);
const Vec4V p2YZX = V4PermYZXW(p2);
// get edges and cross product of edges
Vec4V d = V4Cross(V4Sub(p1, p0), V4Sub(p2, p0)); // (p1 - p0).cross(p2 - p0);
const FloatV dist = V4Dot3(d, normalV);
//if(cp.dot(normalV) < 0)
if(FAllGrtr(zeroV, dist))
{
d = V4Neg(d);
Vec4V temp = p1;
p1 = p2;
p2 = temp;
}
// compute integral terms
Vec4V f1; Vec4V f2; Vec4V f3; Vec4V g0; Vec4V g1; Vec4V g2;
subexpressionsSIMD(p0, p1, p2, f1, f2, f3, g0, g1, g2);
// update integrals
intg = FScaleAdd(V4GetX(d), V4GetX(f1), intg); //intg += d.x*f1.x;
intgV = V4MulAdd(d, f2, intgV); // intgV +=d.multiply(f2);
intgV2 = V4MulAdd(d, f3, intgV2); // intgV2 += d.multiply(f3);
const Vec4V ad0 = V4Mul(p0YZX, g0);
const Vec4V ad1 = V4MulAdd(p1YZX, g1, ad0);
const Vec4V ad2 = V4MulAdd(p2YZX, g2, ad1);
intgVV = V4MulAdd(d, ad2, intgVV); //intgVV += d.multiply(p0YZX.multiply(g0) + p1YZX.multiply(g1) + p2YZX.multiply(g2));
}
}
intg = FMul(intg, mult); // intg *= mult;
intgV = V4Mul(intgV, multV);
intgV2 = V4Mul(intgV2, multV2);
intgVV = V4Mul(intgVV, multVV);
// center of mass ir.COM = intgV/mMassR;
const Vec4V comV = V4ScaleInv(intgV, intg);
// we rewrite the mass, but then we set it back
V4StoreU(comV, &ir.COM.x);
FStore(intg, &mMassR);
ir.mass = PxF64(mMassR); // = intg;
PxVec3 intg2;
V3StoreU(Vec3V_From_Vec4V(intgV2), intg2);
PxVec3 intVV;
V3StoreU(Vec3V_From_Vec4V(intgVV), intVV);
// inertia tensor relative to the provided origin parameter
ir.inertiaTensor[0][0] = PxF64(intg2.y + intg2.z);
ir.inertiaTensor[1][1] = PxF64(intg2.x + intg2.z);
ir.inertiaTensor[2][2] = PxF64(intg2.x + intg2.y);
ir.inertiaTensor[0][1] = ir.inertiaTensor[1][0] = PxF64(-intVV.x);
ir.inertiaTensor[1][2] = ir.inertiaTensor[2][1] = PxF64(-intVV.y);
ir.inertiaTensor[0][2] = ir.inertiaTensor[2][0] = PxF64(-intVV.z);
// inertia tensor relative to center of mass
ir.COMInertiaTensor[0][0] = ir.inertiaTensor[0][0] -PxF64(mMassR*(ir.COM.y*ir.COM.y+ir.COM.z*ir.COM.z));
ir.COMInertiaTensor[1][1] = ir.inertiaTensor[1][1] -PxF64(mMassR*(ir.COM.z*ir.COM.z+ir.COM.x*ir.COM.x));
ir.COMInertiaTensor[2][2] = ir.inertiaTensor[2][2] -PxF64(mMassR*(ir.COM.x*ir.COM.x+ir.COM.y*ir.COM.y));
ir.COMInertiaTensor[0][1] = ir.COMInertiaTensor[1][0] = (ir.inertiaTensor[0][1] +PxF64(mMassR*ir.COM.x*ir.COM.y));
ir.COMInertiaTensor[1][2] = ir.COMInertiaTensor[2][1] = (ir.inertiaTensor[1][2] +PxF64(mMassR*ir.COM.y*ir.COM.z));
ir.COMInertiaTensor[0][2] = ir.COMInertiaTensor[2][0] = (ir.inertiaTensor[0][2] +PxF64(mMassR*ir.COM.z*ir.COM.x));
// inertia tensor relative to (0,0,0)
if (!origin.isZero())
{
PxVec3 sum = ir.COM + origin;
ir.inertiaTensor[0][0] -= PxF64(mMassR*((ir.COM.y*ir.COM.y+ir.COM.z*ir.COM.z) - (sum.y*sum.y+sum.z*sum.z)));
ir.inertiaTensor[1][1] -= PxF64(mMassR*((ir.COM.z*ir.COM.z+ir.COM.x*ir.COM.x) - (sum.z*sum.z+sum.x*sum.x)));
ir.inertiaTensor[2][2] -= PxF64(mMassR*((ir.COM.x*ir.COM.x+ir.COM.y*ir.COM.y) - (sum.x*sum.x+sum.y*sum.y)));
ir.inertiaTensor[0][1] = ir.inertiaTensor[1][0] = ir.inertiaTensor[0][1] + PxF64(mMassR*((ir.COM.x*ir.COM.y) - (sum.x*sum.y)));
ir.inertiaTensor[1][2] = ir.inertiaTensor[2][1] = ir.inertiaTensor[1][2] + PxF64(mMassR*((ir.COM.y*ir.COM.z) - (sum.y*sum.z)));
ir.inertiaTensor[0][2] = ir.inertiaTensor[2][0] = ir.inertiaTensor[0][2] + PxF64(mMassR*((ir.COM.z*ir.COM.x) - (sum.z*sum.x)));
ir.COM = sum;
}
return true;
}
/**
* Computes volume integrals for a polyhedron by summing surface integrals over its faces.
* \param ir [out] a result structure.
* \param origin [in] the origin of the mesh vertices. All vertices will be shifted accordingly prior to computing the volume integrals.
Can improve accuracy, for example, if the centroid is used in the case of a convex mesh. Note: the returned inertia will not be relative to this origin but relative to (0,0,0).
* \return true if success
*/
bool VolumeIntegratorEberly::computeVolumeIntegrals(PxIntegrals& ir, const PxVec3& origin)
{
const PxF64 mult[10] = {1.0/6.0,1.0/24.0,1.0/24.0,1.0/24.0,1.0/60.0,1.0/60.0,1.0/60.0,1.0/120.0,1.0/120.0,1.0/120.0};
PxF64 intg[10] = {0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}; // order: 1, x, y, z, x^2, y^2, z^2, xy, yz, zx
const PxVec3* hullVerts = static_cast<const PxVec3*> (mDesc.points.data);
for (PxU32 i = 0; i < mDesc.polygons.count; i++)
{
const Gu::HullPolygonData& polygon = (static_cast<const Gu::HullPolygonData*> (mDesc.polygons.data))[i];
const PxU8* Data = static_cast<const PxU8*>(mDesc.indices.data) + polygon.mVRef8;
const PxU32 NbVerts = polygon.mNbVerts;
for (PxU32 j = 0; j < NbVerts - 2; j++)
{
const PxVec3 p0 = hullVerts[Data[0]] - origin;
PxVec3 p1 = hullVerts[Data[(j + 1) % NbVerts]] - origin;
PxVec3 p2 = hullVerts[Data[(j + 2) % NbVerts]] - origin;
PxVec3 cp = (p1 - p0).cross(p2 - p0);
if(cp.dot(polygon.mPlane.n) < 0)
{
cp = -cp;
PxSwap(p1,p2);
}
PxF64 x0 = PxF64(p0.x); PxF64 y0 = PxF64(p0.y); PxF64 z0 = PxF64(p0.z);
PxF64 x1 = PxF64(p1.x); PxF64 y1 = PxF64(p1.y); PxF64 z1 = PxF64(p1.z);
PxF64 x2 = PxF64(p2.x); PxF64 y2 = PxF64(p2.y); PxF64 z2 = PxF64(p2.z);
// get edges and cross product of edges
PxF64 d0 = PxF64(cp.x); PxF64 d1 = PxF64(cp.y); PxF64 d2 = PxF64(cp.z);
// compute integral terms
PxF64 f1x; PxF64 f2x; PxF64 f3x; PxF64 g0x; PxF64 g1x; PxF64 g2x;
PxF64 f1y; PxF64 f2y; PxF64 f3y; PxF64 g0y; PxF64 g1y; PxF64 g2y;
PxF64 f1z; PxF64 f2z; PxF64 f3z; PxF64 g0z; PxF64 g1z; PxF64 g2z;
subexpressions(x0, x1, x2, f1x, f2x, f3x, g0x, g1x, g2x);
subexpressions(y0, y1, y2, f1y, f2y, f3y, g0y, g1y, g2y);
subexpressions(z0, z1, z2, f1z, f2z, f3z, g0z, g1z, g2z);
// update integrals
intg[0] += d0*f1x;
intg[1] += d0*f2x; intg[2] += d1*f2y; intg[3] += d2*f2z;
intg[4] += d0*f3x; intg[5] += d1*f3y; intg[6] += d2*f3z;
intg[7] += d0*(y0*g0x + y1*g1x + y2*g2x);
intg[8] += d1*(z0*g0y + z1*g1y + z2*g2y);
intg[9] += d2*(x0*g0z + x1*g1z + x2*g2z);
}
}
for (PxU32 i = 0; i < 10; i++)
{
intg[i] *= mult[i];
}
ir.mass = mMass = intg[0];
// center of mass
ir.COM.x = PxReal(intg[1]/mMass);
ir.COM.y = PxReal(intg[2]/mMass);
ir.COM.z = PxReal(intg[3]/mMass);
// inertia tensor relative to the provided origin parameter
ir.inertiaTensor[0][0] = intg[5]+intg[6];
ir.inertiaTensor[1][1] = intg[4]+intg[6];
ir.inertiaTensor[2][2] = intg[4]+intg[5];
ir.inertiaTensor[0][1] = ir.inertiaTensor[1][0] = -intg[7];
ir.inertiaTensor[1][2] = ir.inertiaTensor[2][1] = -intg[8];
ir.inertiaTensor[0][2] = ir.inertiaTensor[2][0] = -intg[9];
// inertia tensor relative to center of mass
ir.COMInertiaTensor[0][0] = ir.inertiaTensor[0][0] -mMass*PxF64((ir.COM.y*ir.COM.y+ir.COM.z*ir.COM.z));
ir.COMInertiaTensor[1][1] = ir.inertiaTensor[1][1] -mMass*PxF64((ir.COM.z*ir.COM.z+ir.COM.x*ir.COM.x));
ir.COMInertiaTensor[2][2] = ir.inertiaTensor[2][2] -mMass*PxF64((ir.COM.x*ir.COM.x+ir.COM.y*ir.COM.y));
ir.COMInertiaTensor[0][1] = ir.COMInertiaTensor[1][0] = (ir.inertiaTensor[0][1] +mMass*PxF64(ir.COM.x*ir.COM.y));
ir.COMInertiaTensor[1][2] = ir.COMInertiaTensor[2][1] = (ir.inertiaTensor[1][2] +mMass*PxF64(ir.COM.y*ir.COM.z));
ir.COMInertiaTensor[0][2] = ir.COMInertiaTensor[2][0] = (ir.inertiaTensor[0][2] +mMass*PxF64(ir.COM.z*ir.COM.x));
// inertia tensor relative to (0,0,0)
if (!origin.isZero())
{
PxVec3 sum = ir.COM + origin;
ir.inertiaTensor[0][0] -= mMass*PxF64((ir.COM.y*ir.COM.y+ir.COM.z*ir.COM.z) - (sum.y*sum.y+sum.z*sum.z));
ir.inertiaTensor[1][1] -= mMass*PxF64((ir.COM.z*ir.COM.z+ir.COM.x*ir.COM.x) - (sum.z*sum.z+sum.x*sum.x));
ir.inertiaTensor[2][2] -= mMass*PxF64((ir.COM.x*ir.COM.x+ir.COM.y*ir.COM.y) - (sum.x*sum.x+sum.y*sum.y));
ir.inertiaTensor[0][1] = ir.inertiaTensor[1][0] = ir.inertiaTensor[0][1] + mMass*PxF64((ir.COM.x*ir.COM.y) - (sum.x*sum.y));
ir.inertiaTensor[1][2] = ir.inertiaTensor[2][1] = ir.inertiaTensor[1][2] + mMass*PxF64((ir.COM.y*ir.COM.z) - (sum.y*sum.z));
ir.inertiaTensor[0][2] = ir.inertiaTensor[2][0] = ir.inertiaTensor[0][2] + mMass*PxF64((ir.COM.z*ir.COM.x) - (sum.z*sum.x));
ir.COM = sum;
}
return true;
}
} // namespace
// Wrapper
bool computeVolumeIntegrals(const PxSimpleTriangleMesh& mesh, PxReal density, PxIntegrals& integrals)
{
VolumeIntegrator v(mesh, PxF64(density));
return v.computeVolumeIntegrals(integrals);
}
// Wrapper
bool computeVolumeIntegralsEberly(const PxConvexMeshDesc& mesh, PxReal density, PxIntegrals& integrals, const PxVec3& origin, bool useSimd)
{
VolumeIntegratorEberly v(mesh, PxF64(density));
if(useSimd)
return v.computeVolumeIntegralsSIMD(integrals, origin);
else
return v.computeVolumeIntegrals(integrals, origin);
}
}
| 25,885 | C++ | 34.26703 | 181 | 0.627584 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingConvexHullUtils.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_COOKING_CONVEX_HULL_UTILS_H
#define GU_COOKING_CONVEX_HULL_UTILS_H
#include "foundation/PxMemory.h"
#include "foundation/PxPlane.h"
#include "cooking/PxConvexMeshDesc.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxArray.h"
namespace physx
{
//////////////////////////////////////////////////////////////////////////
// helper class for hull construction, holds the vertices and planes together
// while cropping the hull with planes
class ConvexHull : public PxUserAllocated
{
public:
// Helper class for halfedge representation
class HalfEdge
{
public:
PxI16 ea; // the other half of the edge (index into edges list)
PxU8 v; // the vertex at the start of this edge (index into vertices list)
PxU8 p; // the facet on which this edge lies (index into facets list)
HalfEdge(){}
HalfEdge(PxI16 _ea, PxU8 _v, PxU8 _p) :ea(_ea), v(_v), p(_p){}
};
ConvexHull& operator = (const ConvexHull&);
// construct the base cube hull from given max/min AABB
ConvexHull(const PxVec3& bmin, const PxVec3& bmax, const PxArray<PxPlane>& inPlanes);
// construct the base cube hull from given OBB
ConvexHull(const PxVec3& extent, const PxTransform& transform, const PxArray<PxPlane>& inPlanes);
// copy constructor
ConvexHull(const ConvexHull& srcHull)
: mInputPlanes(srcHull.getInputPlanes())
{
copyHull(srcHull);
}
// construct plain hull
ConvexHull(const PxArray<PxPlane>& inPlanes)
: mInputPlanes(inPlanes)
{
}
// finds the candidate plane, returns -1 otherwise
PxI32 findCandidatePlane(float planetestepsilon, float epsilon) const;
// internal check of the hull integrity
bool assertIntact(float epsilon) const;
// return vertices
const PxArray<PxVec3>& getVertices() const
{
return mVertices;
}
// return edges
const PxArray<HalfEdge>& getEdges() const
{
return mEdges;
}
// return faces
const PxArray<PxPlane>& getFacets() const
{
return mFacets;
}
// return input planes
const PxArray<PxPlane>& getInputPlanes() const
{
return mInputPlanes;
}
// return vertices
PxArray<PxVec3>& getVertices()
{
return mVertices;
}
// return edges
PxArray<HalfEdge>& getEdges()
{
return mEdges;
}
// return faces
PxArray<PxPlane>& getFacets()
{
return mFacets;
}
// returns the maximum number of vertices on a face
PxU32 maxNumVertsPerFace() const;
// copy the hull from source
void copyHull(const ConvexHull& src)
{
mVertices.resize(src.getVertices().size());
mEdges.resize(src.getEdges().size());
mFacets.resize(src.getFacets().size());
PxMemCopy(mVertices.begin(), src.getVertices().begin(), src.getVertices().size()*sizeof(PxVec3));
PxMemCopy(mEdges.begin(), src.getEdges().begin(), src.getEdges().size()*sizeof(HalfEdge));
PxMemCopy(mFacets.begin(), src.getFacets().begin(), src.getFacets().size()*sizeof(PxPlane));
}
private:
PxArray<PxVec3> mVertices;
PxArray<HalfEdge> mEdges;
PxArray<PxPlane> mFacets;
const PxArray<PxPlane>& mInputPlanes;
};
//////////////////////////////////////////////////////////////////////////|
// Crops the hull with a provided plane and with given epsilon
// returns new hull if succeeded
ConvexHull* convexHullCrop(const ConvexHull& convex, const PxPlane& slice, float planetestepsilon);
//////////////////////////////////////////////////////////////////////////|
// three planes intersection
PX_FORCE_INLINE PxVec3 threePlaneIntersection(const PxPlane& p0, const PxPlane& p1, const PxPlane& p2)
{
PxMat33 mp = (PxMat33(p0.n, p1.n, p2.n)).getTranspose();
PxMat33 mi = (mp).getInverse();
PxVec3 b(p0.d, p1.d, p2.d);
return -mi.transform(b);
}
//////////////////////////////////////////////////////////////////////////
// Compute OBB around given convex hull
bool computeOBBFromConvex(const PxConvexMeshDesc& desc, PxVec3& sides, PxTransform& matrix);
}
#endif
| 5,641 | C | 31.802325 | 103 | 0.685871 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingConvexMeshBuilder.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuConvexMesh.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxAlloca.h"
#include "GuCooking.h"
#include "GuBigConvexData2.h"
#include "GuBounds.h"
#include "GuCookingVolumeIntegration.h"
#include "GuCookingConvexMeshBuilder.h"
#include "GuCookingBigConvexDataBuilder.h"
#include "CmUtils.h"
#include "foundation/PxVecMath.h"
#include "GuCookingSDF.h"
using namespace physx;
using namespace Gu;
using namespace aos;
///////////////////////////////////////////////////////////////////////////////
PX_IMPLEMENT_OUTPUT_ERROR
///////////////////////////////////////////////////////////////////////////////
ConvexMeshBuilder::ConvexMeshBuilder(const bool buildGRBData) : hullBuilder(&mHullData, buildGRBData), mSdfData(NULL), mBigConvexData(NULL), mMass(0.0f), mInertia(PxIdentity)
{
}
ConvexMeshBuilder::~ConvexMeshBuilder()
{
PX_DELETE(mSdfData);
PX_DELETE(mBigConvexData);
}
// load the mesh data from given polygons
bool ConvexMeshBuilder::build(const PxConvexMeshDesc& desc, PxU32 gaussMapVertexLimit, bool validateOnly, ConvexHullLib* hullLib)
{
if(!desc.isValid())
return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "Gu::ConvexMesh::loadFromDesc: desc.isValid() failed!");
if(!loadConvexHull(desc, hullLib))
return false;
// Compute local bounds (*after* hull has been created)
PxBounds3 minMaxBounds;
computeBoundsAroundVertices(minMaxBounds, mHullData.mNbHullVertices, hullBuilder.mHullDataHullVertices);
mHullData.mAABB = CenterExtents(minMaxBounds);
if(mHullData.mNbHullVertices > gaussMapVertexLimit)
{
if(!computeGaussMaps())
{
return false;
}
}
if(validateOnly)
return true;
// TEST_INTERNAL_OBJECTS
computeInternalObjects();
//~TEST_INTERNAL_OBJECTS
if (desc.sdfDesc)
{
computeSDF(desc);
}
return true;
}
PX_COMPILE_TIME_ASSERT(sizeof(PxMaterialTableIndex)==sizeof(PxU16));
bool ConvexMeshBuilder::save(PxOutputStream& stream, bool platformMismatch) const
{
// Export header
if(!writeHeader('C', 'V', 'X', 'M', PX_CONVEX_VERSION, platformMismatch, stream))
return false;
// Export serialization flags
PxU32 serialFlags = 0;
writeDword(serialFlags, platformMismatch, stream);
if(!hullBuilder.save(stream, platformMismatch))
return false;
// Export local bounds
// writeFloat(geomEpsilon, platformMismatch, stream);
writeFloat(0.0f, platformMismatch, stream);
writeFloat(mHullData.mAABB.getMin(0), platformMismatch, stream);
writeFloat(mHullData.mAABB.getMin(1), platformMismatch, stream);
writeFloat(mHullData.mAABB.getMin(2), platformMismatch, stream);
writeFloat(mHullData.mAABB.getMax(0), platformMismatch, stream);
writeFloat(mHullData.mAABB.getMax(1), platformMismatch, stream);
writeFloat(mHullData.mAABB.getMax(2), platformMismatch, stream);
// Export mass info
writeFloat(mMass, platformMismatch, stream);
writeFloatBuffer(reinterpret_cast<const PxF32*>(&mInertia), 9, platformMismatch, stream);
writeFloatBuffer(&mHullData.mCenterOfMass.x, 3, platformMismatch, stream);
// Export gaussmaps
if(mBigConvexData)
{
writeFloat(1.0f, platformMismatch, stream); //gauss map flag true
BigConvexDataBuilder SVMB(&mHullData, mBigConvexData, hullBuilder.mHullDataHullVertices);
SVMB.save(stream, platformMismatch);
}
else
writeFloat(-1.0f, platformMismatch, stream); //gauss map flag false
if (mSdfData)
{
writeFloat(1.0f, platformMismatch, stream); //sdf flag true
// Export sdf values
writeFloat(mSdfData->mMeshLower.x, platformMismatch, stream);
writeFloat(mSdfData->mMeshLower.y, platformMismatch, stream);
writeFloat(mSdfData->mMeshLower.z, platformMismatch, stream);
writeFloat(mSdfData->mSpacing, platformMismatch, stream);
writeDword(mSdfData->mDims.x, platformMismatch, stream);
writeDword(mSdfData->mDims.y, platformMismatch, stream);
writeDword(mSdfData->mDims.z, platformMismatch, stream);
writeDword(mSdfData->mNumSdfs, platformMismatch, stream);
writeDword(mSdfData->mNumSubgridSdfs, platformMismatch, stream);
writeDword(mSdfData->mNumStartSlots, platformMismatch, stream);
writeDword(mSdfData->mSubgridSize, platformMismatch, stream);
writeDword(mSdfData->mSdfSubgrids3DTexBlockDim.x, platformMismatch, stream);
writeDword(mSdfData->mSdfSubgrids3DTexBlockDim.y, platformMismatch, stream);
writeDword(mSdfData->mSdfSubgrids3DTexBlockDim.z, platformMismatch, stream);
writeFloat(mSdfData->mSubgridsMinSdfValue, platformMismatch, stream);
writeFloat(mSdfData->mSubgridsMaxSdfValue, platformMismatch, stream);
writeDword(mSdfData->mBytesPerSparsePixel, platformMismatch, stream);
writeFloatBuffer(mSdfData->mSdf, mSdfData->mNumSdfs, platformMismatch, stream);
writeByteBuffer(mSdfData->mSubgridSdf, mSdfData->mNumSubgridSdfs, stream);
writeIntBuffer(mSdfData->mSubgridStartSlots, mSdfData->mNumStartSlots, platformMismatch, stream);
}
else
writeFloat(-1.0f, platformMismatch, stream); //sdf flag false
// TEST_INTERNAL_OBJECTS
writeFloat(mHullData.mInternal.mRadius, platformMismatch, stream);
writeFloat(mHullData.mInternal.mExtents[0], platformMismatch, stream);
writeFloat(mHullData.mInternal.mExtents[1], platformMismatch, stream);
writeFloat(mHullData.mInternal.mExtents[2], platformMismatch, stream);
//~TEST_INTERNAL_OBJECTS
return true;
}
//////////////////////////////////////////////////////////////////////////
// instead of saving the data into stream, we copy the mesh data
// into internal Gu::ConvexMesh.
bool ConvexMeshBuilder::copy(Gu::ConvexHullInitData& hullData)
{
// hull builder data copy
PxU32 nb = 0;
hullBuilder.copy(hullData.mHullData, nb);
hullData.mNb = nb;
hullData.mInertia = mInertia;
hullData.mMass = mMass;
// mass props
hullData.mHullData.mAABB = mHullData.mAABB;
hullData.mHullData.mCenterOfMass = mHullData.mCenterOfMass;
// big convex data
if(mBigConvexData)
{
hullData.mHullData.mBigConvexRawData = &mBigConvexData->mData;
hullData.mBigConvexData = mBigConvexData;
mBigConvexData = NULL;
}
else
{
hullData.mHullData.mBigConvexRawData = NULL;
hullData.mBigConvexData = NULL;
}
if (mSdfData)
{
hullData.mHullData.mSdfData = mSdfData;
hullData.mSdfData = mSdfData;
mSdfData = NULL;
}
else
{
hullData.mHullData.mSdfData = NULL;
hullData.mSdfData = NULL;
}
// internal data
hullData.mHullData.mInternal.mRadius = mHullData.mInternal.mRadius;
hullData.mHullData.mInternal.mExtents[0] = mHullData.mInternal.mExtents[0];
hullData.mHullData.mInternal.mExtents[1] = mHullData.mInternal.mExtents[1];
hullData.mHullData.mInternal.mExtents[2] = mHullData.mInternal.mExtents[2];
return true;
}
// compute mass and inertia of the convex mesh
void ConvexMeshBuilder::computeMassInfo(bool lowerPrecision)
{
if(mMass <= 0.0f) //not yet computed.
{
PxIntegrals integrals;
PxConvexMeshDesc meshDesc;
meshDesc.points.count = mHullData.mNbHullVertices;
meshDesc.points.data = hullBuilder.mHullDataHullVertices;
meshDesc.points.stride = sizeof(PxVec3);
meshDesc.polygons.data = hullBuilder.mHullDataPolygons;
meshDesc.polygons.stride = sizeof(Gu::HullPolygonData);
meshDesc.polygons.count = hullBuilder.mHull->mNbPolygons;
meshDesc.indices.data = hullBuilder.mHullDataVertexData8;
// using the centroid of the convex for the volume integration solved accuracy issues in cases where the inertia tensor
// ended up close to not being positive definite and after a few further transforms the diagonalized inertia tensor ended
// up with negative values.
PxVec3 mean(0.0f);
for(PxU32 i=0; i < mHullData.mNbHullVertices; i++)
mean += hullBuilder.mHullDataHullVertices[i];
mean *= (1.0f / mHullData.mNbHullVertices);
if(computeVolumeIntegralsEberly(meshDesc, 1.0f, integrals, mean, lowerPrecision))
{
integrals.getOriginInertia(mInertia);
mHullData.mCenterOfMass = integrals.COM;
//note: the mass will be negative for an inside-out mesh!
if(mInertia.column0.isFinite() && mInertia.column1.isFinite() && mInertia.column2.isFinite()
&& mHullData.mCenterOfMass.isFinite() && PxIsFinite(PxReal(integrals.mass)))
{
if (integrals.mass < 0)
{
outputError<PxErrorCode::eDEBUG_WARNING>(__LINE__, "Gu::ConvexMesh: Mesh has a negative volume! Is it open or do (some) faces have reversed winding? (Taking absolute value.)");
integrals.mass = -integrals.mass;
mInertia = -mInertia;
}
mMass = PxReal(integrals.mass); //set mass to valid value.
return;
}
}
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Gu::ConvexMesh: Error computing mesh mass properties!\n");
}
}
#if PX_VC
#pragma warning(push)
#pragma warning(disable:4996) // permitting use of gatherStrided until we have a replacement.
#endif
bool ConvexMeshBuilder::loadConvexHull(const PxConvexMeshDesc& desc, ConvexHullLib* hullLib)
{
// gather points
PxVec3* geometry = reinterpret_cast<PxVec3*>(PxAlloca(sizeof(PxVec3)*desc.points.count));
immediateCooking::gatherStrided(desc.points.data, geometry, desc.points.count, sizeof(PxVec3), desc.points.stride);
PxU32* topology = NULL;
// gather indices
// store the indices into topology if we have the polygon data
if(desc.indices.data)
{
topology = reinterpret_cast<PxU32*>(PxAlloca(sizeof(PxU32)*desc.indices.count));
if (desc.flags & PxConvexFlag::e16_BIT_INDICES)
{
// conversion; 16 bit index -> 32 bit index & stride
PxU32* dest = topology;
const PxU32* pastLastDest = topology + desc.indices.count;
const PxU8* source = reinterpret_cast<const PxU8*>(desc.indices.data);
while (dest < pastLastDest)
{
const PxU16 * trig16 = reinterpret_cast<const PxU16*>(source);
*dest++ = *trig16;
source += desc.indices.stride;
}
}
else
{
immediateCooking::gatherStrided(desc.indices.data, topology, desc.indices.count, sizeof(PxU32), desc.indices.stride);
}
}
// gather polygons
PxHullPolygon* hullPolygons = NULL;
if(desc.polygons.data)
{
hullPolygons = reinterpret_cast<PxHullPolygon*>(PxAlloca(sizeof(PxHullPolygon)*desc.polygons.count));
immediateCooking::gatherStrided(desc.polygons.data,hullPolygons,desc.polygons.count,sizeof(PxHullPolygon),desc.polygons.stride);
// if user polygons, make sure the largest one is the first one
if (!hullLib)
{
PxU32 largestPolygon = 0;
for (PxU32 i = 1; i < desc.polygons.count; i++)
{
if(hullPolygons[i].mNbVerts > hullPolygons[largestPolygon].mNbVerts)
largestPolygon = i;
}
if(largestPolygon != 0)
{
PxHullPolygon movedPolygon = hullPolygons[0];
hullPolygons[0] = hullPolygons[largestPolygon];
hullPolygons[largestPolygon] = movedPolygon;
}
}
}
const bool doValidation = desc.flags & PxConvexFlag::eDISABLE_MESH_VALIDATION ? false : true;
if(!hullBuilder.init(desc.points.count, geometry, topology, desc.indices.count, desc.polygons.count, hullPolygons, doValidation, hullLib))
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Gu::ConvexMesh::loadConvexHull: convex hull init failed!");
computeMassInfo(desc.flags & PxConvexFlag::eFAST_INERTIA_COMPUTATION);
return true;
}
#if PX_VC
#pragma warning(pop)
#endif
// compute polygons from given triangles. This is support function used in extensions. We do not accept triangles as an input for convex mesh desc.
bool ConvexMeshBuilder::computeHullPolygons(const PxU32& nbVerts,const PxVec3* verts, const PxU32& nbTriangles, const PxU32* triangles, PxAllocatorCallback& inAllocator,
PxU32& outNbVerts, PxVec3*& outVertices , PxU32& nbIndices, PxU32*& indices, PxU32& nbPolygons, PxHullPolygon*& polygons)
{
if(!hullBuilder.computeHullPolygons(nbVerts,verts,nbTriangles,triangles))
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "ConvexMeshBuilder::computeHullPolygons: compute convex hull polygons failed. Provided triangles dont form a convex hull.");
outNbVerts = hullBuilder.mHull->mNbHullVertices;
nbPolygons = hullBuilder.mHull->mNbPolygons;
outVertices = reinterpret_cast<PxVec3*>(inAllocator.allocate(outNbVerts*sizeof(PxVec3),"PxVec3",__FILE__,__LINE__));
PxMemCopy(outVertices,hullBuilder.mHullDataHullVertices,outNbVerts*sizeof(PxVec3));
nbIndices = 0;
for (PxU32 i = 0; i < nbPolygons; i++)
{
nbIndices += hullBuilder.mHullDataPolygons[i].mNbVerts;
}
indices = reinterpret_cast<PxU32*>(inAllocator.allocate(nbIndices*sizeof(PxU32),"PxU32",__FILE__,__LINE__));
for (PxU32 i = 0; i < nbIndices; i++)
{
indices[i] = hullBuilder.mHullDataVertexData8[i];
}
polygons = reinterpret_cast<PxHullPolygon*>(inAllocator.allocate(nbPolygons*sizeof(PxHullPolygon),"PxHullPolygon",__FILE__,__LINE__));
for (PxU32 i = 0; i < nbPolygons; i++)
{
const Gu::HullPolygonData& polygonData = hullBuilder.mHullDataPolygons[i];
PxHullPolygon& outPolygon = polygons[i];
outPolygon.mPlane[0] = polygonData.mPlane.n.x;
outPolygon.mPlane[1] = polygonData.mPlane.n.y;
outPolygon.mPlane[2] = polygonData.mPlane.n.z;
outPolygon.mPlane[3] = polygonData.mPlane.d;
outPolygon.mNbVerts = polygonData.mNbVerts;
outPolygon.mIndexBase = polygonData.mVRef8;
for (PxU32 j = 0; j < polygonData.mNbVerts; j++)
{
PX_ASSERT(indices[outPolygon.mIndexBase + j] == hullBuilder.mHullDataVertexData8[polygonData.mVRef8+j]);
}
}
return true;
}
// compute big convex data
bool ConvexMeshBuilder::computeGaussMaps()
{
// The number of polygons is limited to 256 because the gaussmap encode 256 polys maximum
PxU32 density = 16;
// density = 64;
// density = 8;
// density = 2;
PX_DELETE(mBigConvexData);
PX_NEW_SERIALIZED(mBigConvexData,BigConvexData);
BigConvexDataBuilder SVMB(&mHullData, mBigConvexData, hullBuilder.mHullDataHullVertices);
// valencies we need to compute first, they are needed for min/max precompute
SVMB.computeValencies(hullBuilder);
SVMB.precompute(density);
return true;
}
// TEST_INTERNAL_OBJECTS
static void ComputeInternalExtent(Gu::ConvexHullData& data, const Gu::HullPolygonData* hullPolys)
{
const PxVec3 e = data.mAABB.getMax() - data.mAABB.getMin();
// PT: For that formula, see %SDKRoot%\InternalDocumentation\Cooking\InternalExtents.png
const float r = data.mInternal.mRadius / sqrtf(3.0f);
const float epsilon = 1E-7f;
const PxU32 largestExtent = PxLargestAxis(e);
PxU32 e0 = PxGetNextIndex3(largestExtent);
PxU32 e1 = PxGetNextIndex3(e0);
if(e[e0] < e[e1])
PxSwap<PxU32>(e0,e1);
data.mInternal.mExtents[0] = FLT_MAX;
data.mInternal.mExtents[1] = FLT_MAX;
data.mInternal.mExtents[2] = FLT_MAX;
// PT: the following code does ray-vs-plane raycasts.
// find the largest box along the largest extent, with given internal radius
for(PxU32 i = 0; i < data.mNbPolygons; i++)
{
// concurrent with search direction
const float d = hullPolys[i].mPlane.n[largestExtent];
if((-epsilon < d && d < epsilon))
continue;
const float numBase = -hullPolys[i].mPlane.d - hullPolys[i].mPlane.n.dot(data.mCenterOfMass);
const float denBase = 1.0f/hullPolys[i].mPlane.n[largestExtent];
const float numn0 = r * hullPolys[i].mPlane.n[e0];
const float numn1 = r * hullPolys[i].mPlane.n[e1];
float num = numBase - numn0 - numn1;
float ext = PxMax(fabsf(num*denBase), r);
if(ext < data.mInternal.mExtents[largestExtent])
data.mInternal.mExtents[largestExtent] = ext;
num = numBase - numn0 + numn1;
ext = PxMax(fabsf(num *denBase), r);
if(ext < data.mInternal.mExtents[largestExtent])
data.mInternal.mExtents[largestExtent] = ext;
num = numBase + numn0 + numn1;
ext = PxMax(fabsf(num *denBase), r);
if(ext < data.mInternal.mExtents[largestExtent])
data.mInternal.mExtents[largestExtent] = ext;
num = numBase + numn0 - numn1;
ext = PxMax(fabsf(num *denBase), r);
if(ext < data.mInternal.mExtents[largestExtent])
data.mInternal.mExtents[largestExtent] = ext;
}
// Refine the box along e0,e1
for(PxU32 i = 0; i < data.mNbPolygons; i++)
{
const float denumAdd = hullPolys[i].mPlane.n[e0] + hullPolys[i].mPlane.n[e1];
const float denumSub = hullPolys[i].mPlane.n[e0] - hullPolys[i].mPlane.n[e1];
const float numBase = -hullPolys[i].mPlane.d - hullPolys[i].mPlane.n.dot(data.mCenterOfMass);
const float numn0 = data.mInternal.mExtents[largestExtent] * hullPolys[i].mPlane.n[largestExtent];
if(!(-epsilon < denumAdd && denumAdd < epsilon))
{
float num = numBase - numn0;
float ext = PxMax(fabsf(num/ denumAdd), r);
if(ext < data.mInternal.mExtents[e0])
data.mInternal.mExtents[e0] = ext;
num = numBase + numn0;
ext = PxMax(fabsf(num / denumAdd), r);
if(ext < data.mInternal.mExtents[e0])
data.mInternal.mExtents[e0] = ext;
}
if(!(-epsilon < denumSub && denumSub < epsilon))
{
float num = numBase - numn0;
float ext = PxMax(fabsf(num / denumSub), r);
if(ext < data.mInternal.mExtents[e0])
data.mInternal.mExtents[e0] = ext;
num = numBase + numn0;
ext = PxMax(fabsf(num / denumSub), r);
if(ext < data.mInternal.mExtents[e0])
data.mInternal.mExtents[e0] = ext;
}
}
data.mInternal.mExtents[e1] = data.mInternal.mExtents[e0];
}
//////////////////////////////////////////////////////////////////////////
// compute internal objects, get the internal extent and radius
void ConvexMeshBuilder::computeInternalObjects()
{
const Gu::HullPolygonData* hullPolys = hullBuilder.mHullDataPolygons;
Gu::ConvexHullData& data = mHullData;
// compute the internal radius
data.mInternal.mRadius = FLT_MAX;
for(PxU32 i=0;i<data.mNbPolygons;i++)
{
const float dist = fabsf(hullPolys[i].mPlane.distance(data.mCenterOfMass));
if(dist<data.mInternal.mRadius)
data.mInternal.mRadius = dist;
}
ComputeInternalExtent(data, hullPolys);
PX_ASSERT(PxVec3(mHullData.mInternal.mExtents[0], mHullData.mInternal.mExtents[1], mHullData.mInternal.mExtents[2]).isFinite());
PX_ASSERT(mHullData.mInternal.mExtents[0] != 0.0f);
PX_ASSERT(mHullData.mInternal.mExtents[1] != 0.0f);
PX_ASSERT(mHullData.mInternal.mExtents[2] != 0.0f);
}
bool ConvexMeshBuilder::checkExtentRadiusRatio()
{
return mHullData.checkExtentRadiusRatio();
}
void ConvexMeshBuilder::computeSDF(const PxConvexMeshDesc& desc)
{
PX_DELETE(mSdfData);
PX_NEW_SERIALIZED(mSdfData, SDF);
//create triangle mesh from polygons
const PxU32 nbPolygons = mHullData.mNbPolygons;
PxU32 nbVerts = mHullData.mNbHullVertices;
const Gu::HullPolygonData* hullPolys = hullBuilder.mHullDataPolygons;
const PxU8* polygons = hullBuilder.mHullDataVertexData8;
const PxVec3* verts = hullBuilder.mHullDataHullVertices;
//compute total number of triangles
PxU32 numTotalTriangles = 0;
for (PxU32 i = 0; i < nbPolygons; ++i)
{
const Gu::HullPolygonData& polyData = hullPolys[i];
const PxU32 nbTriangles = polyData.mNbVerts - 2;
numTotalTriangles += nbTriangles;
}
PxArray<PxU32> triangleIndice(numTotalTriangles * 3);
PxU32 startIndex = 0;
for (PxU32 i = 0; i < nbPolygons; ++i)
{
const Gu::HullPolygonData& polyData = hullPolys[i];
const PxU32 nbTriangles = polyData.mNbVerts - 2;
const PxU8 vref0 = polygons[polyData.mVRef8];
for (PxU32 j = 0; j < nbTriangles; ++j)
{
const PxU32 index = startIndex + j * 3;
const PxU32 vref1 = polygons[polyData.mVRef8 + 0 + j + 1];
const PxU32 vref2 = polygons[polyData.mVRef8 + 0 + j + 2];
triangleIndice[index + 0] = vref0;
triangleIndice[index + 1] = vref1;
triangleIndice[index + 2] = vref2;
}
startIndex += nbTriangles * 3;
}
PxArray<PxReal> sdfData;
PxArray<PxU8> sdfDataSubgrids;
PxArray<PxU32> sdfSubgridsStartSlots;
PxTriangleMeshDesc triDesc;
triDesc.points.count = nbVerts;
triDesc.points.stride = sizeof(PxVec3);
triDesc.points.data = verts;
triDesc.triangles.count = numTotalTriangles;
triDesc.triangles.stride = sizeof(PxU32) * 3;
triDesc.triangles.data = triangleIndice.begin();
triDesc.flags &= (~PxMeshFlag::e16_BIT_INDICES);
triDesc.sdfDesc = desc.sdfDesc;
buildSDF(triDesc, sdfData, sdfDataSubgrids, sdfSubgridsStartSlots);
PxSDFDesc& sdfDesc = *desc.sdfDesc;
PxReal* sdf = mSdfData->allocateSdfs(sdfDesc.meshLower, sdfDesc.spacing, sdfDesc.dims.x, sdfDesc.dims.y, sdfDesc.dims.z,
sdfDesc.subgridSize, sdfDesc.sdfSubgrids3DTexBlockDim.x, sdfDesc.sdfSubgrids3DTexBlockDim.y, sdfDesc.sdfSubgrids3DTexBlockDim.z,
sdfDesc.subgridsMinSdfValue, sdfDesc.subgridsMaxSdfValue, sdfDesc.bitsPerSubgridPixel);
if (sdfDesc.subgridSize > 0)
{
//Sparse sdf
immediateCooking::gatherStrided(sdfDesc.sdf.data, sdf, sdfDesc.sdf.count, sizeof(PxReal), sdfDesc.sdf.stride);
immediateCooking::gatherStrided(sdfDesc.sdfSubgrids.data, mSdfData->mSubgridSdf,
sdfDesc.sdfSubgrids.count,
sizeof(PxU8), sdfDesc.sdfSubgrids.stride);
immediateCooking::gatherStrided(sdfDesc.sdfStartSlots.data, mSdfData->mSubgridStartSlots, sdfDesc.sdfStartSlots.count, sizeof(PxU32), sdfDesc.sdfStartSlots.stride);
}
else
{
//copy, and compact to get rid of strides:
immediateCooking::gatherStrided(sdfDesc.sdf.data, sdf, sdfDesc.dims.x * sdfDesc.dims.y * sdfDesc.dims.z, sizeof(PxReal), sdfDesc.sdf.stride);
}
}
//~TEST_INTERNAL_OBJECTS
| 22,642 | C++ | 34.94127 | 185 | 0.736596 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingConvexPolygonsBuilder.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMemory.h"
#include "foundation/PxBasicTemplates.h"
#include "foundation/PxAlloca.h"
#include "foundation/PxUserAllocated.h"
#include "GuAdjacencies.h"
#include "GuMeshCleaner.h"
#include "GuVertexReducer.h"
#include "foundation/PxArray.h"
#include "GuCookingConvexPolygonsBuilder.h"
using namespace physx;
using namespace Gu;
#define USE_PRECOMPUTED_HULL_PROJECTION
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/**
* Computes the center of the hull. It should be inside it !
* \param center [out] hull center
* \return true if success
*/
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static bool computeGeomCenter(PxVec3& center, PxU32 numFaces, const IndexedTriangle32* PX_RESTRICT faces, const PxVec3* PX_RESTRICT hullVerts, PxU32 nbHullVertices)
{
if (!nbHullVertices || !hullVerts)
return false;
// Use the topological method
float totalArea = 0.0f;
center = PxVec3(0);
for (PxU32 i = 0; i < numFaces; i++)
{
IndexedTriangle32 curTri(faces[i].mRef[0], faces[i].mRef[1], faces[i].mRef[2]);
const float area = curTri.area(hullVerts);
PxVec3 curCenter; curTri.center(hullVerts, curCenter);
center += area * curCenter;
totalArea += area;
}
center /= totalArea;
return true;
}
//////////////////////////////////////////////////////////////////////////
//! A generic couple structure
class Pair : public PxUserAllocated
{
public:
PX_FORCE_INLINE Pair() {}
PX_FORCE_INLINE Pair(PxU32 i0, PxU32 i1) : id0(i0), id1(i1) {}
PX_FORCE_INLINE ~Pair() {}
//! Operator for "if(Pair==Pair)"
PX_FORCE_INLINE bool operator==(const Pair& p) const { return (id0==p.id0) && (id1==p.id1); }
//! Operator for "if(Pair!=Pair)"
PX_FORCE_INLINE bool operator!=(const Pair& p) const { return (id0!=p.id0) || (id1!=p.id1); }
PxU32 id0; //!< First index of the pair
PxU32 id1; //!< Second index of the pair
};
PX_COMPILE_TIME_ASSERT(sizeof(Pair)==8);
//////////////////////////////////////////////////////////////////////////
// construct a plane
template <class T>
PX_INLINE PxPlane PlaneEquation(const T& t, const PxVec3* verts)
{
const PxVec3& p0 = verts[t.mRef[0]];
const PxVec3& p1 = verts[t.mRef[1]];
const PxVec3& p2 = verts[t.mRef[2]];
return PxPlane(p0, p1, p2);
}
//////////////////////////////////////////////////////////////////////////
// negate plane
static PX_FORCE_INLINE void negatePlane(HullPolygonData& data)
{
data.mPlane.n = -data.mPlane.n;
data.mPlane.d = -data.mPlane.d;
}
//////////////////////////////////////////////////////////////////////////
// Inverse a buffer in-place
static bool inverseBuffer(PxU32 nbEntries, PxU8* entries)
{
if(!nbEntries || !entries) return false;
for(PxU32 i=0; i < (nbEntries>>1); i++)
PxSwap(entries[i], entries[nbEntries-1-i]);
return true;
}
//////////////////////////////////////////////////////////////////////////
// Extracts a line-strip from a list of non-sorted line-segments (slow)
static bool findLineStrip(PxArray<PxU32>& lineStrip, const PxArray<Pair>& lineSegments)
{
// Ex:
//
// 4-2
// 0-1
// 2-3
// 4-0
// 7-3
// 7-1
//
// => 0-1-7-3-2-4-0
// 0-0-1-1-2-2-3-3-4-4-7-7
// 0-1
// 0-4
// 1-7
// 2-3
// 2-4
// 3-7
// Naive implementation below
PxArray<Pair> Copy(lineSegments);
RunAgain:
{
PxU32 nbSegments = Copy.size();
for(PxU32 j=0;j<nbSegments;j++)
{
PxU32 ID0 = Copy[j].id0;
PxU32 ID1 = Copy[j].id1;
for(PxU32 i=j+1;i<nbSegments;i++)
{
if(
(Copy[i].id0==ID0 && Copy[i].id1==ID1)
|| (Copy[i].id1==ID0 && Copy[i].id0==ID1)
)
{
// Duplicate segment found => remove both
PX_ASSERT(Copy.size()>=2);
Copy.remove(i);
Copy.remove(j);
goto RunAgain;
}
}
}
// Goes through when everything's fine
}
PxU32 ref0 = 0xffffffff;
PxU32 ref1 = 0xffffffff;
if(Copy.size()>=1)
{
Pair* Segments = Copy.begin();
if(Segments)
{
ref0 = Segments->id0;
ref1 = Segments->id1;
lineStrip.pushBack(ref0);
lineStrip.pushBack(ref1);
PX_ASSERT(Copy.size()>=1);
Copy.remove(0);
}
}
Wrap:
// Look for same vertex ref in remaining segments
PxU32 nb = Copy.size();
if(!nb)
{
// ### check the line is actually closed?
return true;
}
for(PxU32 i=0;i<nb;i++)
{
PxU32 newRef0 = Copy[i].id0;
PxU32 newRef1 = Copy[i].id1;
// We look for Ref1 only
if(newRef0==ref1)
{
// r0 - r1
// r1 - x
lineStrip.pushBack(newRef1); // Output the other reference
ref0 = newRef0;
ref1 = newRef1;
Copy.remove(i);
goto Wrap;
}
else if(newRef1==ref1)
{
// r0 - r1
// x - r1 => r1 - x
lineStrip.pushBack(newRef0); // Output the other reference
ref0 = newRef1;
ref1 = newRef0;
Copy.remove(i);
goto Wrap;
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////
// Test for duplicate triangles
PX_COMPILE_TIME_ASSERT(sizeof(IndexedTriangle32)==sizeof(PxVec3)); // ...
static bool TestDuplicateTriangles(PxU32& nbFaces, IndexedTriangle32* faces, bool repair)
{
if(!nbFaces || !faces)
return true;
IndexedTriangle32* indices32 = reinterpret_cast<IndexedTriangle32*>(PxAlloca(nbFaces*sizeof(IndexedTriangle32)));
for(PxU32 i=0;i<nbFaces;i++)
{
indices32[i].mRef[0] = faces[i].mRef[0];
indices32[i].mRef[1] = faces[i].mRef[1];
indices32[i].mRef[2] = faces[i].mRef[2];
}
// Radix-sort power...
ReducedVertexCloud reducer(reinterpret_cast<PxVec3*>(indices32), nbFaces);
REDUCEDCLOUD rc;
reducer.reduce(&rc);
if(rc.NbRVerts<nbFaces)
{
if(repair)
{
nbFaces = rc.NbRVerts;
for(PxU32 i=0;i<nbFaces;i++)
{
const IndexedTriangle32* curTri = reinterpret_cast<const IndexedTriangle32*>(&rc.RVerts[i]);
faces[i].mRef[0] = curTri->mRef[0];
faces[i].mRef[1] = curTri->mRef[1];
faces[i].mRef[2] = curTri->mRef[2];
}
}
return false; // Test failed
}
return true; // Test succeeded
}
//////////////////////////////////////////////////////////////////////////
// plane culling test
static PX_FORCE_INLINE bool testCulling(const IndexedTriangle32& triangle, const PxVec3* verts, const PxVec3& center)
{
const PxPlane plane(verts[triangle.mRef[0]], verts[triangle.mRef[1]], verts[triangle.mRef[2]]);
return plane.distance(center)>0.0f;
}
//////////////////////////////////////////////////////////////////////////
// face normals test
static bool TestUnifiedNormals(PxU32 nbVerts, const PxVec3* verts, PxU32 nbFaces, IndexedTriangle32* faces, bool repair)
{
if(!nbVerts || !verts || !nbFaces || !faces)
return false;
// Unify normals so that all hull faces are well oriented
// Compute geometric center - we need a vertex inside the hull
const float coeff = 1.0f / float(nbVerts);
PxVec3 geomCenter(0.0f, 0.0f, 0.0f);
for(PxU32 i=0;i<nbVerts;i++)
{
geomCenter.x += verts[i].x * coeff;
geomCenter.y += verts[i].y * coeff;
geomCenter.z += verts[i].z * coeff;
}
// We know the hull is (hopefully) convex so we can easily test whether a point is inside the hull or not.
// The previous geometric center must be invisible from any hull face: that's our test to decide whether a normal
// must be flipped or not.
bool status = true;
for(PxU32 i=0;i<nbFaces;i++)
{
// Test face visibility from the geometric center (supposed to be inside the hull).
// All faces must be invisible from this point to ensure a strict CCW order.
if(testCulling(faces[i], verts, geomCenter))
{
if(repair) faces[i].flip();
status = false;
}
}
return status;
}
//////////////////////////////////////////////////////////////////////////
// clean the mesh
static bool CleanFaces(PxU32& nbFaces, IndexedTriangle32* faces, PxU32& nbVerts, PxVec3* verts)
{
// Brute force mesh cleaning.
// PT: I added this back on Feb-18-05 because it fixes bugs with hulls from QHull.
MeshCleaner cleaner(nbVerts, verts, nbFaces, faces->mRef, 0.0f, 0.0f);
if (!cleaner.mNbTris)
return false;
nbVerts = cleaner.mNbVerts;
nbFaces = cleaner.mNbTris;
PxMemCopy(verts, cleaner.mVerts, cleaner.mNbVerts*sizeof(PxVec3));
for (PxU32 i = 0; i < cleaner.mNbTris; i++)
{
faces[i].mRef[0] = cleaner.mIndices[i * 3 + 0];
faces[i].mRef[1] = cleaner.mIndices[i * 3 + 1];
faces[i].mRef[2] = cleaner.mIndices[i * 3 + 2];
}
// Get rid of duplicates
TestDuplicateTriangles(nbFaces, faces, true);
// Unify normals
TestUnifiedNormals(nbVerts, verts, nbFaces, faces, true);
// Remove zero-area triangles
// TestZeroAreaTriangles(nbFaces, faces, verts, true);
// Unify normals again
TestUnifiedNormals(nbVerts, verts, nbFaces, faces, true);
// Get rid of duplicates again
TestDuplicateTriangles(nbFaces, faces, true);
return true;
}
//////////////////////////////////////////////////////////////////////////
// check the newly constructed faces
static bool CheckFaces(PxU32 nbFaces, const IndexedTriangle32* faces, PxU32 nbVerts, const PxVec3* verts)
{
// Remove const since we use functions that can do both testing & repairing. But we won't change the data.
IndexedTriangle32* f = const_cast<IndexedTriangle32*>(faces);
// Test duplicate faces
if(!TestDuplicateTriangles(nbFaces, f, false))
return false;
// Test unified normals
if(!TestUnifiedNormals(nbVerts, verts, nbFaces, f, false))
return false;
return true;
}
//////////////////////////////////////////////////////////////////////////
// compute the newell plane from the face verts
static bool computeNewellPlane(PxPlane& plane, PxU32 nbVerts, const PxU8* indices, const PxVec3* verts)
{
if(!nbVerts || !indices || !verts)
return false;
PxVec3 centroid(0,0,0), normal(0,0,0);
for(PxU32 i=nbVerts-1, j=0; j<nbVerts; i=j, j++)
{
normal.x += (verts[indices[i]].y - verts[indices[j]].y) * (verts[indices[i]].z + verts[indices[j]].z);
normal.y += (verts[indices[i]].z - verts[indices[j]].z) * (verts[indices[i]].x + verts[indices[j]].x);
normal.z += (verts[indices[i]].x - verts[indices[j]].x) * (verts[indices[i]].y + verts[indices[j]].y);
centroid += verts[indices[j]];
}
plane.n = normal;
plane.n.normalize();
plane.d = -(centroid.dot(plane.n))/float(nbVerts);
return true;
}
/**
* Analyses a redundant vertices and splits the polygons if necessary.
* \relates ConvexHull
* \fn extractHullPolygons(Container& polygon_data, const ConvexHull& hull)
* \param nb_polygons [out] number of extracted polygons
* \param polygon_data [out] polygon data: (Nb indices, index 0, index 1... index N)(Nb indices, index 0, index 1... index N)(...)
* \param hull [in] convex hull
* \param redundantVertices [out] redundant vertices found inside the polygons - we want to remove them because of PCM
*/
static void checkRedundantVertices(PxU32& nb_polygons, PxArray<PxU32>& polygon_data, const ConvexPolygonsBuilder& hull, PxArray<PxU32>& triangle_data, PxArray<PxU32>& redundantVertices)
{
const PxU32* dFaces = reinterpret_cast<const PxU32*>(hull.getFaces());
bool needToSplitPolygons = false;
bool* polygonMarkers = reinterpret_cast<bool*>(PxAlloca(nb_polygons*sizeof(bool)));
PxMemZero(polygonMarkers, nb_polygons*sizeof(bool));
bool* redundancyMarkers = reinterpret_cast<bool*>(PxAlloca(redundantVertices.size()*sizeof(bool)));
PxMemZero(redundancyMarkers, redundantVertices.size()*sizeof(bool));
// parse through the redundant vertices and if we cannot remove them split just the actual polygon if possible
PxArray<PxU32> polygonsContainer;
PxU32 numEntries = 0;
for (PxU32 i = redundantVertices.size(); i--;)
{
numEntries = 0;
polygonsContainer.clear();
// go through polygons, if polygons does have only 3 verts we cannot remove any vertex from it, try to decompose the second one
PxU32* Data = polygon_data.begin();
for(PxU32 t=0;t<nb_polygons;t++)
{
PxU32 nbVerts = *Data++;
PX_ASSERT(nbVerts>=3); // Else something very wrong happened...
for(PxU32 j=0;j<nbVerts;j++)
{
if(redundantVertices[i] == Data[j])
{
polygonsContainer.pushBack(t);
polygonsContainer.pushBack(nbVerts);
numEntries++;
break;
}
}
Data += nbVerts;
}
bool needToSplit = false;
for (PxU32 j = 0; j < numEntries; j++)
{
PxU32 numInternalVertices = polygonsContainer[j*2 + 1];
if(numInternalVertices == 3)
{
needToSplit = true;
}
}
// now lets mark the polygons for split
if(needToSplit)
{
// mark the redundant vertex, it is solved by spliting, dont report it
needToSplitPolygons = true;
redundancyMarkers[i] = true;
for (PxU32 j = 0; j < numEntries; j++)
{
PxU32 polygonNumber = polygonsContainer[j*2];
PxU32 numInternalPolygons = polygonsContainer[j*2 + 1];
if(numInternalPolygons != 3)
{
polygonMarkers[polygonNumber] = true;
}
}
}
}
if(needToSplitPolygons)
{
// parse from the end so we can remove it and not change the order
for (PxU32 i = redundantVertices.size(); i--;)
{
// remove it
if(redundancyMarkers[i])
{
redundantVertices.remove(i);
}
}
PxArray<PxU32> newPolygon_data;
PxArray<PxU32> newTriangle_data;
PxU32 newNb_polygons = 0;
PxU32* data = polygon_data.begin();
PxU32* triData = triangle_data.begin();
for(PxU32 i=0;i<nb_polygons;i++)
{
PxU32 nbVerts = *data++;
PxU32 nbTris = *triData++;
if(polygonMarkers[i])
{
// split the polygon into triangles
for(PxU32 k=0;k< nbTris; k++)
{
newNb_polygons++;
const PxU32 faceIndex = triData[k];
newPolygon_data.pushBack(PxU32(3));
newPolygon_data.pushBack(dFaces[3*faceIndex]);
newPolygon_data.pushBack(dFaces[3*faceIndex + 1]);
newPolygon_data.pushBack(dFaces[3*faceIndex + 2]);
newTriangle_data.pushBack(PxU32(1));
newTriangle_data.pushBack(faceIndex);
}
}
else
{
newNb_polygons++;
// copy the original polygon
newPolygon_data.pushBack(nbVerts);
for(PxU32 j=0;j<nbVerts;j++)
newPolygon_data.pushBack(data[j]);
// copy the original polygon triangles
newTriangle_data.pushBack(nbTris);
for(PxU32 k=0;k< nbTris; k++)
{
newTriangle_data.pushBack(triData[k]);
}
}
data += nbVerts;
triData += nbTris;
}
// now put the data to output
polygon_data.clear();
triangle_data.clear();
// the copy does copy even the data
polygon_data = newPolygon_data;
triangle_data = newTriangle_data;
nb_polygons = newNb_polygons;
}
}
/**
* Analyses a convex hull made of triangles and extracts polygon data out of it.
* \relates ConvexHull
* \fn extractHullPolygons(PxArray<PxU32>& polygon_data, const ConvexHull& hull)
* \param nb_polygons [out] number of extracted polygons
* \param polygon_data [out] polygon data: (Nb indices, index 0, index 1... index N)(Nb indices, index 0, index 1... index N)(...)
* \param hull [in] convex hull
* \param triangle_data [out] triangle data
* \param rendundantVertices [out] redundant vertices found inside the polygons - we want to remove them because of PCM
* \return true if success
*/
static bool extractHullPolygons(PxU32& nb_polygons, PxArray<PxU32>& polygon_data, const ConvexPolygonsBuilder& hull, PxArray<PxU32>* triangle_data, PxArray<PxU32>& rendundantVertices)
{
PxU32 nbFaces = hull.getNbFaces();
const PxVec3* hullVerts = hull.mHullDataHullVertices;
const PxU32 nbVertices = hull.mHull->mNbHullVertices;
const PxU16* wFaces = NULL;
const PxU32* dFaces = reinterpret_cast<const PxU32*>(hull.getFaces());
PX_ASSERT(wFaces || dFaces);
ADJACENCIESCREATE create;
create.NbFaces = nbFaces;
create.DFaces = dFaces;
create.WFaces = wFaces;
create.Verts = hullVerts;
//Create.Epsilon = 0.01f; // PT: trying to fix Rob Elam bug. Also fixes TTP 2467
// Create.Epsilon = 0.001f; // PT: for "Bruno's bug"
create.Epsilon = 0.005f; // PT: middle-ground seems to fix both. Expose this param?
AdjacenciesBuilder adj;
if(!adj.Init(create)) return false;
PxU32 nbBoundaryEdges = adj.ComputeNbBoundaryEdges();
if(nbBoundaryEdges) return false; // A valid hull shouldn't have open edges!!
bool* markers = reinterpret_cast<bool*>(PxAlloca(nbFaces*sizeof(bool)));
PxMemZero(markers, nbFaces*sizeof(bool));
PxU8* vertexMarkers = reinterpret_cast<PxU8*>(PxAlloca(nbVertices*sizeof(PxU8)));
PxMemZero(vertexMarkers, nbVertices*sizeof(PxU8));
PxU32 currentFace = 0; // Start with first triangle
nb_polygons = 0;
do
{
currentFace = 0;
while(currentFace<nbFaces && markers[currentFace]) currentFace++;
// Start from "closest" face and floodfill through inactive edges
struct Local
{
static void FloodFill(PxArray<PxU32>& indices, const AdjTriangle* faces, PxU32 current, bool* inMarkers)
{
if(inMarkers[current]) return;
inMarkers[current] = true;
indices.pushBack(current);
const AdjTriangle& AT = faces[current];
// We can floodfill through inactive edges since the mesh is convex (inactive==planar)
if(!AT.HasActiveEdge01()) FloodFill(indices, faces, AT.GetAdjTri(EDGE01), inMarkers);
if(!AT.HasActiveEdge20()) FloodFill(indices, faces, AT.GetAdjTri(EDGE02), inMarkers);
if(!AT.HasActiveEdge12()) FloodFill(indices, faces, AT.GetAdjTri(EDGE12), inMarkers);
}
static bool GetNeighborFace(PxU32 index,PxU32 triangleIndex,const AdjTriangle* faces, const PxU32* dfaces, PxU32& neighbor, PxU32& current)
{
PxU32 currentIndex = index;
PxU32 previousIndex = index;
bool firstFace = true;
bool next = true;
while (next)
{
const AdjTriangle& currentAT = faces[currentIndex];
PxU32 refTr0 = dfaces[currentIndex*3 + 0];
PxU32 refTr1 = dfaces[currentIndex*3 + 1];
PxU32 edge[2];
edge[0] = 1;
edge[1] = 2;
if(triangleIndex == refTr0)
{
edge[0] = 0;
edge[1] = 1;
}
else
{
if(triangleIndex == refTr1)
{
edge[0] = 0;
edge[1] = 2;
}
}
if(currentAT.HasActiveEdge(edge[0]) && currentAT.HasActiveEdge(edge[1]))
{
return false;
}
if(!currentAT.HasActiveEdge(edge[0]) && !currentAT.HasActiveEdge(edge[1]))
{
// not interested in testing transition vertices
if(currentIndex == index)
{
return false;
}
// transition one
for (PxU32 i = 0; i < 2; i++)
{
PxU32 testIndex = currentAT.GetAdjTri(SharedEdgeIndex(edge[i]));
// exit if we circle around the vertex back to beginning
if(testIndex == index && previousIndex != index)
{
return false;
}
if(testIndex != previousIndex)
{
// move to next
previousIndex = currentIndex;
currentIndex = testIndex;
break;
}
}
}
else
{
if(!currentAT.HasActiveEdge(edge[0]))
{
PxU32 t = edge[0];
edge[0] = edge[1];
edge[1] = t;
}
if(currentAT.HasActiveEdge(edge[0]))
{
PxU32 testIndex = currentAT.GetAdjTri(SharedEdgeIndex(edge[0]));
if(firstFace)
{
firstFace = false;
}
else
{
neighbor = testIndex;
current = currentIndex;
return true;
}
}
if(!currentAT.HasActiveEdge(edge[1]))
{
PxU32 testIndex = currentAT.GetAdjTri(SharedEdgeIndex(edge[1]));
if(testIndex != index)
{
previousIndex = currentIndex;
currentIndex = testIndex;
}
}
}
}
return false;
}
static bool CheckFloodFillFace(PxU32 index,const AdjTriangle* faces, const PxU32* dfaces)
{
if(!dfaces)
return true;
const AdjTriangle& checkedAT = faces[index];
PxU32 refTr0 = dfaces[index*3 + 0];
PxU32 refTr1 = dfaces[index*3 + 1];
PxU32 refTr2 = dfaces[index*3 + 2];
for (PxU32 i = 0; i < 3; i++)
{
if(!checkedAT.HasActiveEdge(i))
{
PxU32 testTr0 = refTr1;
PxU32 testTr1 = refTr2;
PxU32 testIndex0 = 0;
PxU32 testIndex1 = 1;
if(i == 0)
{
testTr0 = refTr0;
testTr1 = refTr1;
testIndex0 = 1;
testIndex1 = 2;
}
else
{
if(i == 1)
{
testTr0 = refTr0;
testTr1 = refTr2;
testIndex0 = 0;
testIndex1 = 2;
}
}
PxU32 adjFaceTested = checkedAT.GetAdjTri(SharedEdgeIndex(testIndex0));
PxU32 neighborIndex00;
PxU32 neighborIndex01;
bool found0 = GetNeighborFace(index,testTr0,faces,dfaces, neighborIndex00, neighborIndex01);
PxU32 neighborIndex10;
PxU32 neighborIndex11;
bool found1 = GetNeighborFace(adjFaceTested,testTr0,faces,dfaces, neighborIndex10, neighborIndex11);
if(found0 && found1 && neighborIndex00 == neighborIndex11 && neighborIndex01 == neighborIndex10)
{
return false;
}
adjFaceTested = checkedAT.GetAdjTri(SharedEdgeIndex(testIndex1));
found0 = GetNeighborFace(index,testTr1,faces,dfaces,neighborIndex00,neighborIndex01);
found1 = GetNeighborFace(adjFaceTested,testTr1,faces,dfaces,neighborIndex10,neighborIndex11);
if(found0 && found1 && neighborIndex00 == neighborIndex11 && neighborIndex01 == neighborIndex10)
{
return false;
}
}
}
return true;
}
static bool CheckFloodFill(PxArray<PxU32>& indices,AdjTriangle* faces,bool* inMarkers, const PxU32* dfaces)
{
bool valid = true;
for(PxU32 i=0;i<indices.size();i++)
{
//const AdjTriangle& AT = faces[indices.GetEntry(i)];
for(PxU32 j= i + 1;j<indices.size();j++)
{
const AdjTriangle& testAT = faces[indices[j]];
if(testAT.GetAdjTri(EDGE01) == indices[i])
{
if(testAT.HasActiveEdge01())
{
valid = false;
}
}
if(testAT.GetAdjTri(EDGE02) == indices[i])
{
if(testAT.HasActiveEdge20())
{
valid = false;
}
}
if(testAT.GetAdjTri(EDGE12) == indices[i])
{
if(testAT.HasActiveEdge12())
{
valid = false;
}
}
if(!valid)
break;
}
if(!CheckFloodFillFace(indices[i], faces, dfaces))
{
valid = false;
}
if(!valid)
break;
}
if(!valid)
{
for(PxU32 i=0;i<indices.size();i++)
{
AdjTriangle& AT = faces[indices[i]];
AT.mATri[0] |= 0x20000000;
AT.mATri[1] |= 0x20000000;
AT.mATri[2] |= 0x20000000;
inMarkers[indices[i]] = false;
}
indices.forceSize_Unsafe(0);
return true;
}
return false;
}
};
if(currentFace!=nbFaces)
{
PxArray<PxU32> indices; // Indices of triangles forming hull polygon
bool doFill = true;
while (doFill)
{
Local::FloodFill(indices, adj.mFaces, currentFace, markers);
doFill = Local::CheckFloodFill(indices,adj.mFaces,markers, dFaces);
}
// Now it would be nice to recreate a closed linestrip, similar to silhouette extraction. The line is composed of active edges, this time.
PxArray<Pair> activeSegments;
//Container ActiveSegments;
// Loop through triangles composing the polygon
for(PxU32 i=0;i<indices.size();i++)
{
const PxU32 currentTriIndex = indices[i]; // Catch current triangle
const PxU32 vRef0 = dFaces ? dFaces[currentTriIndex*3+0] : wFaces[currentTriIndex*3+0];
const PxU32 vRef1 = dFaces ? dFaces[currentTriIndex*3+1] : wFaces[currentTriIndex*3+1];
const PxU32 vRef2 = dFaces ? dFaces[currentTriIndex*3+2] : wFaces[currentTriIndex*3+2];
// Keep active edges
if(adj.mFaces[currentTriIndex].HasActiveEdge01()) { activeSegments.pushBack(Pair(vRef0,vRef1)); }
if(adj.mFaces[currentTriIndex].HasActiveEdge20()) { activeSegments.pushBack(Pair(vRef0,vRef2)); }
if(adj.mFaces[currentTriIndex].HasActiveEdge12()) { activeSegments.pushBack(Pair(vRef1,vRef2)); }
}
// We assume the polygon is convex. In that case it should always be possible to retriangulate it so that the triangles are
// implicit (in particular, it should always be possible to remove interior triangles)
PxArray<PxU32> lineStrip;
if(findLineStrip(lineStrip, activeSegments))
{
PxU32 nb = lineStrip.size();
if(nb)
{
const PxU32* entries = lineStrip.begin();
PX_ASSERT(entries[0] == entries[nb-1]); // findLineStrip() is designed that way. Might not be what we want!
// We get rid of the last (duplicated) index
polygon_data.pushBack(nb-1);
for (PxU32 i = 0; i < nb-1; i++)
{
vertexMarkers[entries[i]]++;
polygon_data.pushBack(entries[i]);
}
nb_polygons++;
// Loop through vertices composing the line strip polygon end mark the redundant vertices inside the polygon
for(PxU32 i=0;i<indices.size();i++)
{
const PxU32 CurrentTriIndex = indices[i]; // Catch current triangle
const PxU32 VRef0 = dFaces ? dFaces[CurrentTriIndex*3+0] : wFaces[CurrentTriIndex*3+0];
const PxU32 VRef1 = dFaces ? dFaces[CurrentTriIndex*3+1] : wFaces[CurrentTriIndex*3+1];
const PxU32 VRef2 = dFaces ? dFaces[CurrentTriIndex*3+2] : wFaces[CurrentTriIndex*3+2];
bool found0 = false;
bool found1 = false;
bool found2 = false;
for (PxU32 j=0;j < nb - 1; j++)
{
if(VRef0 == entries[j])
{
found0 = true;
}
if(VRef1 == entries[j])
{
found1 = true;
}
if(VRef2 == entries[j])
{
found2 = true;
}
if(found0 && found1 && found2)
break;
}
if(!found0)
{
if(rendundantVertices.find(VRef0) == rendundantVertices.end())
rendundantVertices.pushBack(VRef0);
}
if(!found1)
{
if(rendundantVertices.find(VRef1) == rendundantVertices.end())
rendundantVertices.pushBack(VRef1);
}
if(!found2)
{
if(rendundantVertices.find(VRef2) == rendundantVertices.end())
rendundantVertices.pushBack(VRef2);
}
}
// If needed, output triangle indices used to build this polygon
if(triangle_data)
{
triangle_data->pushBack(indices.size());
for (PxU32 j = 0; j < indices.size(); j++)
triangle_data->pushBack(indices[j]);
}
}
}
else
return PxGetFoundation().error(PxErrorCode::eINVALID_OPERATION, PX_FL, "Meshmerizer::extractHullPolygons: line strip extraction failed");
}
}
while(currentFace!=nbFaces);
for (PxU32 i = 0; i < nbVertices; i++)
{
if(vertexMarkers[i] < 3)
{
if(rendundantVertices.find(i) == rendundantVertices.end())
rendundantVertices.pushBack(i);
}
}
if(rendundantVertices.size() > 0 && triangle_data)
checkRedundantVertices(nb_polygons,polygon_data,hull,*triangle_data,rendundantVertices);
return true;
}
//////////////////////////////////////////////////////////////////////////
ConvexPolygonsBuilder::ConvexPolygonsBuilder(ConvexHullData* hull, const bool buildGRBData)
: ConvexHullBuilder(hull, buildGRBData), mNbHullFaces(0), mFaces(NULL)
{
}
//////////////////////////////////////////////////////////////////////////
ConvexPolygonsBuilder::~ConvexPolygonsBuilder()
{
PX_FREE(mFaces);
}
//////////////////////////////////////////////////////////////////////////
// compute hull polygons from given hull triangles
bool ConvexPolygonsBuilder::computeHullPolygons(const PxU32& nbVerts,const PxVec3* verts, const PxU32& nbTriangles, const PxU32* triangles)
{
PX_ASSERT(triangles);
PX_ASSERT(verts);
mHullDataHullVertices = NULL;
mHullDataPolygons = NULL;
mHullDataVertexData8 = NULL;
mHullDataFacesByEdges8 = NULL;
mHullDataFacesByVertices8 = NULL;
mNbHullFaces = nbTriangles;
mHull->mNbHullVertices = PxTo8(nbVerts);
// allocate additional vec3 for V4 safe load in VolumeInteration
mHullDataHullVertices = PX_ALLOCATE(PxVec3, (mHull->mNbHullVertices + 1), "PxVec3");
PxMemCopy(mHullDataHullVertices, verts, mHull->mNbHullVertices*sizeof(PxVec3));
mFaces = PX_ALLOCATE(IndexedTriangle32, mNbHullFaces, "mFaces");
for(PxU32 i=0;i<mNbHullFaces;i++)
{
PX_ASSERT(triangles[i*3+0]<=0xffff);
PX_ASSERT(triangles[i*3+1]<=0xffff);
PX_ASSERT(triangles[i*3+2]<=0xffff);
mFaces[i].mRef[0] = triangles[i*3+0];
mFaces[i].mRef[1] = triangles[i*3+1];
mFaces[i].mRef[2] = triangles[i*3+2];
}
IndexedTriangle32* hullAsIndexedTriangle = mFaces;
// We don't trust the user at all... So, clean the hull.
PxU32 nbHullVerts = mHull->mNbHullVertices;
CleanFaces(mNbHullFaces, hullAsIndexedTriangle, nbHullVerts, mHullDataHullVertices);
PX_ASSERT(nbHullVerts<256);
mHull->mNbHullVertices = PxTo8(nbHullVerts);
// ...and then run the full tests again.
if(!CheckFaces(mNbHullFaces, hullAsIndexedTriangle, mHull->mNbHullVertices, mHullDataHullVertices))
return false;
// Transform triangles-to-polygons
if(!createPolygonData())
return false;
return checkHullPolygons();
}
/**
* Computes polygon data.
* \return true if success
*/
bool ConvexPolygonsBuilder::createPolygonData()
{
// Cleanup
mHull->mNbPolygons = 0;
PX_FREE(mHullDataVertexData8);
PX_FREE(mHullDataFacesByVertices8);
PX_FREE(mHullDataPolygons);
// Extract polygon data from triangle data
PxArray<PxU32> temp;
PxArray<PxU32> temp2;
PxArray<PxU32> rendundantVertices;
PxU32 nbPolygons;
if(!extractHullPolygons(nbPolygons, temp, *this, &temp2,rendundantVertices))
return false;
PxVec3* reducedHullDataHullVertices = mHullDataHullVertices;
PxU8 numReducedHullDataVertices = mHull->mNbHullVertices;
if(rendundantVertices.size() > 0)
{
numReducedHullDataVertices = PxTo8(mHull->mNbHullVertices - rendundantVertices.size());
reducedHullDataHullVertices = PX_ALLOCATE(PxVec3, numReducedHullDataVertices, "Reduced vertices hull data");
PxU8* remapTable = PX_ALLOCATE(PxU8, mHull->mNbHullVertices, "remapTable");
PxU8 currentIndex = 0;
for (PxU8 i = 0; i < mHull->mNbHullVertices; i++)
{
if(rendundantVertices.find(i) == rendundantVertices.end())
{
PX_ASSERT(currentIndex < numReducedHullDataVertices);
reducedHullDataHullVertices[currentIndex] = mHullDataHullVertices[i];
remapTable[i] = currentIndex;
currentIndex++;
}
else
{
remapTable[i] = 0xFF;
}
}
PxU32* data = temp.begin();
for(PxU32 i=0;i<nbPolygons;i++)
{
PxU32 nbVerts = *data++;
PX_ASSERT(nbVerts>=3); // Else something very wrong happened...
for(PxU32 j=0;j<nbVerts;j++)
{
PX_ASSERT(data[j] < mHull->mNbHullVertices);
data[j] = remapTable[data[j]];
}
data += nbVerts;
}
PX_FREE(remapTable);
}
if(nbPolygons>255)
return PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "ConvexHullBuilder: convex hull has more than 255 polygons!");
// Precompute hull polygon structures
mHull->mNbPolygons = PxTo8(nbPolygons);
mHullDataPolygons = PX_ALLOCATE(HullPolygonData, mHull->mNbPolygons, "Gu::HullPolygonData");
PxMemZero(mHullDataPolygons, sizeof(HullPolygonData)*mHull->mNbPolygons);
// The winding hasn't been preserved so we need to handle this. Basically we need to "unify normals"
// exactly as we did at hull creation time - except this time we work on polygons
PxVec3 geomCenter;
computeGeomCenter(geomCenter, mNbHullFaces, mFaces, mHullDataHullVertices, mHull->mNbHullVertices);
// Loop through polygons
// We have N polygons => remove N entries for number of vertices
PxU32 tmp = temp.size() - nbPolygons;
mHullDataVertexData8 = PX_ALLOCATE(PxU8, tmp, "mHullDataVertexData8");
PxU8* dest = mHullDataVertexData8;
const PxU32* data = temp.begin();
const PxU32* triData = temp2.begin();
for(PxU32 i=0;i<nbPolygons;i++)
{
mHullDataPolygons[i].mVRef8 = PxU16(dest - mHullDataVertexData8); // Setup link for current polygon
PxU32 nbVerts = *data++;
PX_ASSERT(nbVerts>=3); // Else something very wrong happened...
mHullDataPolygons[i].mNbVerts = PxTo8(nbVerts);
PxU32 index = 0;
for(PxU32 j=0;j<nbVerts;j++)
{
if(data[j] != 0xFF)
{
dest[index] = PxTo8(data[j]);
index++;
}
else
{
mHullDataPolygons[i].mNbVerts--;
}
}
// Compute plane equation
{
computeNewellPlane(mHullDataPolygons[i].mPlane, mHullDataPolygons[i].mNbVerts, dest, reducedHullDataHullVertices);
PxU32 nbTris = *triData++; // #tris in current poly
bool flip = false;
for(PxU32 k=0;k< nbTris; k++)
{
PxU32 triIndex = *triData++; // Index of one triangle composing polygon
PX_ASSERT(triIndex<mNbHullFaces);
const IndexedTriangle32& T = reinterpret_cast<const IndexedTriangle32&>(mFaces[triIndex]);
const PxPlane PL = PlaneEquation(T, mHullDataHullVertices);
if(k==0 && PL.n.dot(mHullDataPolygons[i].mPlane.n) < 0.0f)
{
flip = true;
}
}
if(flip)
{
negatePlane(mHullDataPolygons[i]);
inverseBuffer(mHullDataPolygons[i].mNbVerts, dest);
}
for(PxU32 j=0;j<mHull->mNbHullVertices;j++)
{
float d = - (mHullDataPolygons[i].mPlane.n).dot(mHullDataHullVertices[j]);
if(d<mHullDataPolygons[i].mPlane.d) mHullDataPolygons[i].mPlane.d=d;
}
}
// "Unify normal"
if(mHullDataPolygons[i].mPlane.distance(geomCenter)>0.0f)
{
inverseBuffer(mHullDataPolygons[i].mNbVerts, dest);
negatePlane(mHullDataPolygons[i]);
PX_ASSERT(mHullDataPolygons[i].mPlane.distance(geomCenter)<=0.0f);
}
// Next one
data += nbVerts; // Skip vertex indices
dest += mHullDataPolygons[i].mNbVerts;
}
if(reducedHullDataHullVertices != mHullDataHullVertices)
{
PxMemCopy(mHullDataHullVertices,reducedHullDataHullVertices,sizeof(PxVec3)*numReducedHullDataVertices);
PX_FREE(reducedHullDataHullVertices);
mHull->mNbHullVertices = numReducedHullDataVertices;
}
//calculate the vertex map table
if(!calculateVertexMapTable(nbPolygons))
return false;
#ifdef USE_PRECOMPUTED_HULL_PROJECTION
// Loop through polygons
for(PxU32 j=0;j<nbPolygons;j++)
{
// Precompute hull projection along local polygon normal
PxU32 nbVerts = mHull->mNbHullVertices;
const PxVec3* verts = mHullDataHullVertices;
HullPolygonData& polygon = mHullDataPolygons[j];
PxReal min = PX_MAX_F32;
PxU8 minIndex = 0xff;
for (PxU8 i = 0; i < nbVerts; i++)
{
float dp = (*verts++).dot(polygon.mPlane.n);
if(dp < min)
{
min = dp;
minIndex = i;
}
}
polygon.mMinIndex = minIndex;
}
#endif
// Triangulate newly created polygons to recreate a clean vertex cloud.
return createTrianglesFromPolygons();
}
//////////////////////////////////////////////////////////////////////////
// create back triangles from polygons
bool ConvexPolygonsBuilder::createTrianglesFromPolygons()
{
if (!mHull->mNbPolygons || !mHullDataPolygons) return false;
PxU32 maxNbTriangles = 0;
for (PxU32 i = 0; i < mHull->mNbPolygons; i++)
{
if (mHullDataPolygons[i].mNbVerts < 3)
return PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "ConvexHullBuilder::CreateTrianglesFromPolygons: convex hull has a polygon with less than 3 vertices!");
maxNbTriangles += mHullDataPolygons[i].mNbVerts - 2;
}
IndexedTriangle32* tmpFaces = PX_ALLOCATE(IndexedTriangle32, maxNbTriangles, "tmpFaces");
IndexedTriangle32* currFace = tmpFaces;
PxU32 nbTriangles = 0;
const PxU8* vertexData = mHullDataVertexData8;
const PxVec3* hullVerts = mHullDataHullVertices;
for (PxU32 i = 0; i < mHull->mNbPolygons; i++)
{
const PxU8* data = vertexData + mHullDataPolygons[i].mVRef8;
PxU32 nbVerts = mHullDataPolygons[i].mNbVerts;
// Triangulate the polygon such that all all generated triangles have one and the same vertex
// in common.
//
// Make sure to avoid creating zero area triangles. Imagine the following polygon:
//
// 4 3
// *------------------*
// | |
// *---*----*----*----*
// 5 6 0 1 2
//
// Choosing vertex 0 as the shared vertex, the following zero area triangles will be created:
// [0 1 2], [0 5 6]
//
// Check for these triangles and discard them
// Note: Such polygons should only occur if the user defines the convex hull, i.e., the triangles
// of the convex shape, himself. If the convex hull is built from the vertices only, the
// hull algorithm removes the useless vertices.
//
for (PxU32 j = 0; j < nbVerts - 2; j++)
{
currFace->mRef[0] = data[0];
currFace->mRef[1] = data[(j + 1) % nbVerts];
currFace->mRef[2] = data[(j + 2) % nbVerts];
const PxVec3& p0 = hullVerts[currFace->mRef[0]];
const PxVec3& p1 = hullVerts[currFace->mRef[1]];
const PxVec3& p2 = hullVerts[currFace->mRef[2]];
const float area = ((p1 - p0).cross(p2 - p0)).magnitudeSquared();
if (area != 0.0f) // Else discard the triangle
{
nbTriangles++;
currFace++;
}
}
}
PX_FREE(mFaces);
IndexedTriangle32* faces;
PX_ASSERT(nbTriangles <= maxNbTriangles);
if (maxNbTriangles == nbTriangles)
{
// No zero area triangles, hence the face buffer has correct size and can be used directly.
faces = tmpFaces;
}
else
{
// Resize face buffer because some triangles were discarded.
faces = PX_ALLOCATE(IndexedTriangle32, nbTriangles, "mFaces");
if (!faces) // PT: TODO: is there a reason why we test the alloc result here and nowhere else?
{
PX_FREE(tmpFaces);
return false;
}
PxMemCopy(faces, tmpFaces, sizeof(IndexedTriangle32)*nbTriangles);
PX_FREE(tmpFaces);
}
mFaces = faces;
mNbHullFaces = nbTriangles;
// TODO: at this point useless vertices should be removed from the hull. The current fix is to initialize
// support vertices to known valid vertices, but it's not really convincing.
// Re-unify normals
PxVec3 geomCenter;
computeGeomCenter(geomCenter, mNbHullFaces, mFaces, mHullDataHullVertices, mHull->mNbHullVertices);
for (PxU32 i = 0; i < mNbHullFaces; i++)
{
const PxPlane P(hullVerts[mFaces[i].mRef[0]],
hullVerts[mFaces[i].mRef[1]],
hullVerts[mFaces[i].mRef[2]]);
if (P.distance(geomCenter) > 0.0f)
{
mFaces[i].flip();
}
}
return true;
}
| 39,872 | C++ | 28.934685 | 199 | 0.644036 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuRTreeCooking.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxBounds3.h"
#include "foundation/PxMemory.h"
#include "common/PxTolerancesScale.h"
#include "foundation/PxSimpleTypes.h"
#include "foundation/PxSort.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxVecMath.h"
#include "foundation/PxInlineArray.h"
#include "GuRTree.h"
#include "GuRTreeCooking.h"
#define PRINT_RTREE_COOKING_STATS 0 // AP: keeping this frequently used macro for diagnostics/benchmarking
#if PRINT_RTREE_COOKING_STATS
#include <stdio.h>
#endif
using namespace physx::Gu;
using namespace physx::aos;
namespace physx
{
// Google "wikipedia QuickSelect" for algorithm explanation
namespace quickSelect {
#define SWAP32(x, y) { PxU32 tmp = y; y = x; x = tmp; }
// left is the index of the leftmost element of the subarray
// right is the index of the rightmost element of the subarray (inclusive)
// number of elements in subarray = right-left+1
template<typename LtEq>
PxU32 partition(PxU32* PX_RESTRICT a, PxU32 left, PxU32 right, PxU32 pivotIndex, const LtEq& cmpLtEq)
{
PX_ASSERT(pivotIndex >= left && pivotIndex <= right);
PxU32 pivotValue = a[pivotIndex];
SWAP32(a[pivotIndex], a[right]) // Move pivot to end
PxU32 storeIndex = left;
for (PxU32 i = left; i < right; i++) // left <= i < right
if (cmpLtEq(a[i], pivotValue))
{
SWAP32(a[i], a[storeIndex]);
storeIndex++;
}
SWAP32(a[storeIndex], a[right]); // Move pivot to its final place
for (PxU32 i = left; i < storeIndex; i++)
PX_ASSERT(cmpLtEq(a[i], a[storeIndex]));
for (PxU32 i = storeIndex+1; i <= right; i++)
PX_ASSERT(cmpLtEq(a[storeIndex], a[i]));
return storeIndex;
}
// left is the index of the leftmost element of the subarray
// right is the index of the rightmost element of the subarray (inclusive)
// number of elements in subarray = right-left+1
// recursive version
template<typename LtEq>
void quickFindFirstK(PxU32* PX_RESTRICT a, PxU32 left, PxU32 right, PxU32 k, const LtEq& cmpLtEq)
{
PX_ASSERT(k <= right-left+1);
if (right > left)
{
// select pivotIndex between left and right
PxU32 pivotIndex = (left + right) >> 1;
PxU32 pivotNewIndex = partition(a, left, right, pivotIndex, cmpLtEq);
// now all elements to the left of pivotNewIndex are < old value of a[pivotIndex] (bottom half values)
if (pivotNewIndex > left + k) // new condition
quickFindFirstK(a, left, pivotNewIndex-1, k, cmpLtEq);
if (pivotNewIndex < left + k)
quickFindFirstK(a, pivotNewIndex+1, right, k+left-pivotNewIndex-1, cmpLtEq);
}
}
// non-recursive version
template<typename LtEq>
void quickSelectFirstK(PxU32* PX_RESTRICT a, PxU32 left, PxU32 right, PxU32 k, const LtEq& cmpLtEq)
{
PX_ASSERT(k <= right-left+1);
for (;;)
{
PxU32 pivotIndex = (left+right) >> 1;
PxU32 pivotNewIndex = partition(a, left, right, pivotIndex, cmpLtEq);
PxU32 pivotDist = pivotNewIndex - left + 1;
if (pivotDist == k)
return;
else if (k < pivotDist)
{
PX_ASSERT(pivotNewIndex > 0);
right = pivotNewIndex - 1;
}
else
{
k = k - pivotDist;
left = pivotNewIndex+1;
}
}
}
} // namespace quickSelect
// Intermediate non-quantized representation for RTree node in a page (final format is SIMD transposed page)
struct RTreeNodeNQ
{
PxBounds3 bounds;
PxI32 childPageFirstNodeIndex; // relative to the beginning of all build tree nodes array
PxI32 leafCount; // -1 for empty nodes, 0 for non-terminal nodes, number of enclosed tris if non-zero (LeafTriangles), also means a terminal node
struct U {}; // selector struct for uninitialized constructor
RTreeNodeNQ(U) {} // uninitialized constructor
RTreeNodeNQ() : bounds(PxBounds3::empty()), childPageFirstNodeIndex(-1), leafCount(0) {}
};
// SIMD version of bounds class
struct PxBounds3V
{
struct U {}; // selector struct for uninitialized constructor
Vec3V mn, mx;
PxBounds3V(Vec3VArg mn_, Vec3VArg mx_) : mn(mn_), mx(mx_) {}
PxBounds3V(U) {} // uninitialized constructor
PX_FORCE_INLINE Vec3V getExtents() const { return V3Sub(mx, mn); }
PX_FORCE_INLINE void include(const PxBounds3V& other) { mn = V3Min(mn, other.mn); mx = V3Max(mx, other.mx); }
// convert vector extents to PxVec3
PX_FORCE_INLINE const PxVec3 getMinVec3() const { PxVec3 ret; V3StoreU(mn, ret); return ret; }
PX_FORCE_INLINE const PxVec3 getMaxVec3() const { PxVec3 ret; V3StoreU(mx, ret); return ret; }
};
static void buildFromBounds(
Gu::RTree& resultTree, const PxBounds3V* allBounds, PxU32 numBounds,
PxArray<PxU32>& resultPermute, RTreeCooker::RemapCallback* rc, Vec3VArg allMn, Vec3VArg allMx,
PxReal sizePerfTradeOff, PxMeshCookingHint::Enum hint);
/////////////////////////////////////////////////////////////////////////
void RTreeCooker::buildFromTriangles(
Gu::RTree& result, const PxVec3* verts, PxU32 numVerts, const PxU16* tris16, const PxU32* tris32, PxU32 numTris,
PxArray<PxU32>& resultPermute, RTreeCooker::RemapCallback* rc, PxReal sizePerfTradeOff01, PxMeshCookingHint::Enum hint)
{
PX_UNUSED(numVerts);
PxArray<PxBounds3V> allBounds;
allBounds.reserve(numTris);
Vec3V allMn = Vec3V_From_FloatV(FMax()), allMx = Vec3V_From_FloatV(FNegMax());
Vec3V eps = V3Splat(FLoad(5e-4f)); // AP scaffold: use PxTolerancesScale here?
// build RTree AABB bounds from triangles, conservative bound inflation is also performed here
for(PxU32 i = 0; i < numTris; i ++)
{
PxU32 i0, i1, i2;
PxU32 i3 = i*3;
if(tris16)
{
i0 = tris16[i3]; i1 = tris16[i3+1]; i2 = tris16[i3+2];
} else
{
i0 = tris32[i3]; i1 = tris32[i3+1]; i2 = tris32[i3+2];
}
PX_ASSERT_WITH_MESSAGE(i0 < numVerts && i1 < numVerts && i2 < numVerts ,"Input mesh triangle's vertex index exceeds specified numVerts.");
Vec3V v0 = V3LoadU(verts[i0]), v1 = V3LoadU(verts[i1]), v2 = V3LoadU(verts[i2]);
Vec3V mn = V3Sub(V3Min(V3Min(v0, v1), v2), eps); // min over 3 verts, subtract eps to inflate
Vec3V mx = V3Add(V3Max(V3Max(v0, v1), v2), eps); // max over 3 verts, add eps to inflate
allMn = V3Min(allMn, mn); allMx = V3Max(allMx, mx);
allBounds.pushBack(PxBounds3V(mn, mx));
}
buildFromBounds(result, allBounds.begin(), numTris, resultPermute, rc, allMn, allMx, sizePerfTradeOff01, hint);
}
/////////////////////////////////////////////////////////////////////////
// Fast but lower quality 4-way split sorting using repeated application of quickselect
// comparator template struct for sortin gbounds centers given a coordinate index (x,y,z=0,1,2)
struct BoundsLTE
{
PxU32 coordIndex;
const PxVec3* PX_RESTRICT boundCenters; // AP: precomputed centers are faster than recomputing the centers
BoundsLTE(PxU32 coordIndex_, const PxVec3* boundCenters_)
: coordIndex(coordIndex_), boundCenters(boundCenters_)
{}
PX_FORCE_INLINE bool operator()(const PxU32 & idx1, const PxU32 & idx2) const
{
PxF32 center1 = boundCenters[idx1][coordIndex];
PxF32 center2 = boundCenters[idx2][coordIndex];
return (center1 <= center2);
}
};
// ======================================================================
// Quick sorting method
// recursive sorting procedure:
// 1. find min and max extent along each axis for the current cluster
// 2. split input cluster into two 3 times using quickselect, splitting off a quarter of the initial cluster size each time
// 3. the axis is potentialy different for each split using the following
// approximate splitting heuristic - reduce max length by some estimated factor to encourage split along other axis
// since we cut off between a quarter to a half of elements in this direction per split
// the reduction for first split should be *0.75f but we use 0.8
// to account for some node overlap. This is somewhat of an arbitrary choice and there's room for improvement.
// 4. recurse on new clusters (goto step 1)
//
struct SubSortQuick
{
static const PxReal reductionFactors[RTREE_N-1];
enum { NTRADEOFF = 9 };
static const PxU32 stopAtTrisPerLeaf1[NTRADEOFF]; // presets for PxCookingParams::meshSizePerformanceTradeoff implementation
const PxU32* permuteEnd;
const PxU32* permuteStart;
const PxBounds3V* allBounds;
PxArray<PxVec3> boundCenters;
PxU32 maxBoundsPerLeafPage;
// initialize the context for the sorting routine
SubSortQuick(PxU32* permute, const PxBounds3V* allBounds_, PxU32 allBoundsSize, PxReal sizePerfTradeOff01)
: allBounds(allBounds_)
{
permuteEnd = permute + allBoundsSize;
permuteStart = permute;
PxU32 boundsCount = allBoundsSize;
boundCenters.reserve(boundsCount); // AP - measured that precomputing centers helps with perf significantly (~20% on 1k verts)
for(PxU32 i = 0; i < boundsCount; i++)
boundCenters.pushBack( allBounds[i].getMinVec3() + allBounds[i].getMaxVec3() );
PxU32 iTradeOff = PxMin<PxU32>( PxU32(PxMax<PxReal>(0.0f, sizePerfTradeOff01)*NTRADEOFF), NTRADEOFF-1 );
maxBoundsPerLeafPage = stopAtTrisPerLeaf1[iTradeOff];
}
// implements the sorting/splitting procedure
void sort4(
PxU32* PX_RESTRICT permute, const PxU32 clusterSize, // beginning and size of current recursively processed cluster
PxArray<RTreeNodeNQ>& resultTree, PxU32& maxLevels,
PxBounds3V& subTreeBound, PxU32 level = 0)
{
if(level == 0)
maxLevels = 1;
else
maxLevels = PxMax(maxLevels, level+1);
PX_ASSERT(permute + clusterSize <= permuteEnd);
PX_ASSERT(maxBoundsPerLeafPage >= RTREE_N-1);
const PxU32 cluster4 = PxMax<PxU32>(clusterSize/RTREE_N, 1);
PX_ASSERT(clusterSize > 0);
// find min and max world bound for current cluster
Vec3V mx = allBounds[permute[0]].mx, mn = allBounds[permute[0]].mn; PX_ASSERT(permute[0] < boundCenters.size());
for(PxU32 i = 1; i < clusterSize; i ++)
{
PX_ASSERT(permute[i] < boundCenters.size());
mx = V3Max(mx, allBounds[permute[i]].mx);
mn = V3Min(mn, allBounds[permute[i]].mn);
}
PX_ALIGN_PREFIX(16) PxReal maxElem[4] PX_ALIGN_SUFFIX(16);
V3StoreA(V3Sub(mx, mn), *reinterpret_cast<PxVec3*>(maxElem)); // compute the dimensions and store into a scalar maxElem array
// split along the longest axis
const PxU32 maxDiagElement = PxU32(maxElem[0] > maxElem[1] && maxElem[0] > maxElem[2] ? 0 : (maxElem[1] > maxElem[2] ? 1 : 2));
BoundsLTE cmpLte(maxDiagElement, boundCenters.begin());
const PxU32 startNodeIndex = resultTree.size();
resultTree.resizeUninitialized(startNodeIndex+RTREE_N); // at each recursion level we add 4 nodes to the tree
PxBounds3V childBound( (PxBounds3V::U()) ); // start off uninitialized for performance
const PxI32 leftover = PxMax<PxI32>(PxI32(clusterSize - cluster4*(RTREE_N-1)), 0);
PxU32 totalCount = 0;
for(PxU32 i = 0; i < RTREE_N; i++)
{
// split off cluster4 count nodes out of the entire cluster for each i
const PxU32 clusterOffset = cluster4*i;
PxU32 count1; // cluster4 or leftover depending on whether it's the last cluster
if(i < RTREE_N-1)
{
// only need to so quickSelect for the first pagesize-1 clusters
if(clusterOffset <= clusterSize-1)
{
quickSelect::quickSelectFirstK(permute, clusterOffset, clusterSize-1, cluster4, cmpLte);
// approximate heuristic - reduce max length by some estimated factor to encourage split along other axis
// since we cut off a quarter of elements in this direction the reduction should be *0.75f but we use 0.8
// to account for some node overlap. This is somewhat of an arbitrary choice though
maxElem[cmpLte.coordIndex] *= reductionFactors[i];
// recompute cmpLte.coordIndex from updated maxElements
cmpLte.coordIndex = PxU32(maxElem[0] > maxElem[1] && maxElem[0] > maxElem[2] ? 0 : (maxElem[1] > maxElem[2] ? 1 : 2));
}
count1 = cluster4;
} else
{
count1 = PxU32(leftover);
// verify that leftover + sum of previous clusters adds up to clusterSize or leftover is 0
// leftover can be 0 if clusterSize<RTREE_N, this is generally rare, can happen for meshes with < RTREE_N tris
PX_ASSERT(leftover == 0 || cluster4*i + count1 == clusterSize);
}
RTreeNodeNQ& curNode = resultTree[startNodeIndex+i];
totalCount += count1; // accumulate total node count
if(count1 <= maxBoundsPerLeafPage) // terminal page according to specified maxBoundsPerLeafPage
{
if(count1 && totalCount <= clusterSize)
{
// this will be true most of the time except when the total number of triangles in the mesh is < PAGESIZE
curNode.leafCount = PxI32(count1);
curNode.childPageFirstNodeIndex = PxI32(clusterOffset + PxU32(permute-permuteStart));
childBound = allBounds[permute[clusterOffset+0]];
for(PxU32 i1 = 1; i1 < count1; i1++)
{
const PxBounds3V& bnd = allBounds[permute[clusterOffset+i1]];
childBound.include(bnd);
}
} else
{
// since we are required to have PAGESIZE nodes per page for simd, we fill any leftover with empty nodes
// we should only hit this if the total number of triangles in the mesh is < PAGESIZE
childBound.mn = childBound.mx = V3Zero(); // shouldn't be necessary but setting just in case
curNode.bounds.setEmpty();
curNode.leafCount = -1;
curNode.childPageFirstNodeIndex = -1; // using -1 for empty node
}
} else // not a terminal page, recurse on count1 nodes cluster
{
curNode.childPageFirstNodeIndex = PxI32(resultTree.size());
curNode.leafCount = 0;
sort4(permute+cluster4*i, count1, resultTree, maxLevels, childBound, level+1);
}
if(i == 0)
subTreeBound = childBound; // initialize subTreeBound with first childBound
else
subTreeBound.include(childBound); // expand subTreeBound with current childBound
// can use curNode since the reference change due to resizing in recursive call, need to recompute the pointer
RTreeNodeNQ& curNode1 = resultTree[startNodeIndex+i];
curNode1.bounds.minimum = childBound.getMinVec3(); // update node bounds using recursively computed childBound
curNode1.bounds.maximum = childBound.getMaxVec3();
}
}
};
// heuristic size reduction factors for splitting heuristic (see how it's used above)
const PxReal SubSortQuick::reductionFactors[RTREE_N-1] = {0.8f, 0.7f, 0.6f};
// sizePerf trade-off presets for sorting routines
const PxU32 SubSortQuick::stopAtTrisPerLeaf1[SubSortQuick::NTRADEOFF] = {16, 14, 12, 10, 8, 7, 6, 5, 4};
/////////////////////////////////////////////////////////////////////////
// SAH sorting method
//
// Preset table: lower index=better size -> higher index = better perf
static const PxU32 NTRADEOFF = 15;
// % -24 -23 -17 -15 -10 -8 -5 -3 0 +3 +3 +5 +7 +8 +9 - % raycast MeshSurface*Random benchmark perf
// K 717 734 752 777 793 811 824 866 903 939 971 1030 1087 1139 1266 - testzone size in K
// # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 - preset number
static const PxU32 stopAtTrisPerPage[NTRADEOFF] = { 64, 60, 56, 48, 46, 44, 40, 36, 32, 28, 24, 20, 16, 12, 12};
static const PxU32 stopAtTrisPerLeaf[NTRADEOFF] = { 16, 14, 12, 10, 9, 8, 8, 6, 5, 5, 5, 4, 4, 4, 2}; // capped at 2 anyway
/////////////////////////////////////////////////////////////////////////
// comparator struct for sorting the bounds along a specified coordIndex (coordIndex=0,1,2 for X,Y,Z)
struct SortBoundsPredicate
{
PxU32 coordIndex;
const PxBounds3V* allBounds;
SortBoundsPredicate(PxU32 coordIndex_, const PxBounds3V* allBounds_) : coordIndex(coordIndex_), allBounds(allBounds_)
{}
bool operator()(const PxU32 & idx1, const PxU32 & idx2) const
{
// using the bounds center for comparison
PxF32 center1 = V3ReadXYZ(allBounds[idx1].mn)[coordIndex] + V3ReadXYZ(allBounds[idx1].mx)[coordIndex];
PxF32 center2 = V3ReadXYZ(allBounds[idx2].mn)[coordIndex] + V3ReadXYZ(allBounds[idx2].mx)[coordIndex];
return (center1 < center2);
}
};
/////////////////////////////////////////////////////////////////////////
// auxiliary class for SAH build (SAH = surface area heuristic)
struct Interval
{
PxU32 start, count;
Interval(PxU32 s, PxU32 c) : start(s), count(c) {}
};
// SAH function - returns surface area for given AABB extents
static PX_FORCE_INLINE void PxSAH(const Vec3VArg v, PxF32& sah)
{
FStore(V3Dot(v, V3PermZXY(v)), &sah); // v.x*v.y + v.y*v.z + v.x*v.z;
}
struct SubSortSAH
{
PxU32* PX_RESTRICT permuteStart, *PX_RESTRICT tempPermute;
const PxBounds3V* PX_RESTRICT allBounds;
PxF32* PX_RESTRICT metricL;
PxF32* PX_RESTRICT metricR;
const PxU32* PX_RESTRICT xOrder, *PX_RESTRICT yOrder, *PX_RESTRICT zOrder;
const PxU32* PX_RESTRICT xRanks, *PX_RESTRICT yRanks, *PX_RESTRICT zRanks;
PxU32* PX_RESTRICT tempRanks;
PxU32 nbTotalBounds;
PxU32 iTradeOff;
// precompute various values used during sort
SubSortSAH(
PxU32* permute, const PxBounds3V* allBounds_, PxU32 numBounds,
const PxU32* xOrder_, const PxU32* yOrder_, const PxU32* zOrder_,
const PxU32* xRanks_, const PxU32* yRanks_, const PxU32* zRanks_, PxReal sizePerfTradeOff01)
: permuteStart(permute), allBounds(allBounds_),
xOrder(xOrder_), yOrder(yOrder_), zOrder(zOrder_),
xRanks(xRanks_), yRanks(yRanks_), zRanks(zRanks_), nbTotalBounds(numBounds)
{
metricL = PX_ALLOCATE(PxF32, numBounds, "metricL");
metricR = PX_ALLOCATE(PxF32, numBounds, "metricR");
tempPermute = PX_ALLOCATE(PxU32, (numBounds*2+1), "tempPermute");
tempRanks = PX_ALLOCATE(PxU32, numBounds, "tempRanks");
iTradeOff = PxMin<PxU32>( PxU32(PxMax<PxReal>(0.0f, sizePerfTradeOff01)*NTRADEOFF), NTRADEOFF-1 );
}
~SubSortSAH() // release temporarily used memory
{
PX_FREE(metricL);
PX_FREE(metricR);
PX_FREE(tempPermute);
PX_FREE(tempRanks);
}
////////////////////////////////////////////////////////////////////
// returns split position for second array start relative to permute ptr
PxU32 split(PxU32* permute, PxU32 clusterSize)
{
if(clusterSize <= 1)
return 0;
if(clusterSize == 2)
return 1;
PxI32 minCount = clusterSize >= 4 ? 2 : 1;
PxI32 splitStartL = minCount; // range=[startL->endL)
PxI32 splitEndL = PxI32(clusterSize-minCount);
PxI32 splitStartR = PxI32(clusterSize-splitStartL); // range=(endR<-startR], startR > endR
PxI32 splitEndR = PxI32(clusterSize-splitEndL);
PX_ASSERT(splitEndL-splitStartL == splitStartR-splitEndR);
PX_ASSERT(splitStartL <= splitEndL);
PX_ASSERT(splitStartR >= splitEndR);
PX_ASSERT(splitEndR >= 1);
PX_ASSERT(splitEndL < PxI32(clusterSize));
// pick the best axis with some splitting metric
// axis index is X=0, Y=1, Z=2
PxF32 minMetric[3];
PxU32 minMetricSplit[3];
const PxU32* ranks3[3] = { xRanks, yRanks, zRanks };
const PxU32* orders3[3] = { xOrder, yOrder, zOrder };
for(PxU32 coordIndex = 0; coordIndex <= 2; coordIndex++)
{
SortBoundsPredicate sortPredicateLR(coordIndex, allBounds);
const PxU32* rank = ranks3[coordIndex];
const PxU32* order = orders3[coordIndex];
// build ranks in tempPermute
if(clusterSize == nbTotalBounds) // AP: about 4% perf gain from this optimization
{
// if this is a full cluster sort, we already have it done
for(PxU32 i = 0; i < clusterSize; i ++)
tempPermute[i] = order[i];
} else
{
// sort the tempRanks
for(PxU32 i = 0; i < clusterSize; i ++)
tempRanks[i] = rank[permute[i]];
PxSort(tempRanks, clusterSize);
for(PxU32 i = 0; i < clusterSize; i ++) // convert back from ranks to indices
tempPermute[i] = order[tempRanks[i]];
}
// we consider overlapping intervals for minimum sum of metrics
// left interval is from splitStartL up to splitEndL
// right interval is from splitStartR down to splitEndR
// first compute the array metricL
Vec3V boundsLmn = allBounds[tempPermute[0]].mn; // init with 0th bound
Vec3V boundsLmx = allBounds[tempPermute[0]].mx; // init with 0th bound
PxI32 ii;
for(ii = 1; ii < splitStartL; ii++) // sweep right to include all bounds up to splitStartL-1
{
boundsLmn = V3Min(boundsLmn, allBounds[tempPermute[ii]].mn);
boundsLmx = V3Max(boundsLmx, allBounds[tempPermute[ii]].mx);
}
PxU32 countL0 = 0;
for(ii = splitStartL; ii <= splitEndL; ii++) // compute metric for inclusive bounds from splitStartL to splitEndL
{
boundsLmn = V3Min(boundsLmn, allBounds[tempPermute[ii]].mn);
boundsLmx = V3Max(boundsLmx, allBounds[tempPermute[ii]].mx);
PxSAH(V3Sub(boundsLmx, boundsLmn), metricL[countL0++]);
}
// now we have metricL
// now compute the array metricR
Vec3V boundsRmn = allBounds[tempPermute[clusterSize-1]].mn; // init with last bound
Vec3V boundsRmx = allBounds[tempPermute[clusterSize-1]].mx; // init with last bound
for(ii = PxI32(clusterSize-2); ii > splitStartR; ii--) // include bounds to the left of splitEndR down to splitStartR
{
boundsRmn = V3Min(boundsRmn, allBounds[tempPermute[ii]].mn);
boundsRmx = V3Max(boundsRmx, allBounds[tempPermute[ii]].mx);
}
PxU32 countR0 = 0;
for(ii = splitStartR; ii >= splitEndR; ii--) // continue sweeping left, including bounds and recomputing the metric
{
boundsRmn = V3Min(boundsRmn, allBounds[tempPermute[ii]].mn);
boundsRmx = V3Max(boundsRmx, allBounds[tempPermute[ii]].mx);
PxSAH(V3Sub(boundsRmx, boundsRmn), metricR[countR0++]);
}
PX_ASSERT((countL0 == countR0) && (countL0 == PxU32(splitEndL-splitStartL+1)));
// now iterate over splitRange and compute the minimum sum of SAHLeft*countLeft + SAHRight*countRight
PxU32 minMetricSplitPosition = 0;
PxF32 minMetricLocal = PX_MAX_REAL;
const PxI32 hsI32 = PxI32(clusterSize/2);
const PxI32 splitRange = (splitEndL-splitStartL+1);
for(ii = 0; ii < splitRange; ii++)
{
PxF32 countL = PxF32(ii+minCount); // need to add minCount since ii iterates over splitRange
PxF32 countR = PxF32(splitRange-ii-1+minCount);
PX_ASSERT(PxU32(countL + countR) == clusterSize);
const PxF32 metric = (countL*metricL[ii] + countR*metricR[splitRange-ii-1]);
const PxU32 splitPos = PxU32(ii+splitStartL);
if(metric < minMetricLocal ||
(metric <= minMetricLocal && // same metric but more even split
PxAbs(PxI32(splitPos)-hsI32) < PxAbs(PxI32(minMetricSplitPosition)-hsI32)))
{
minMetricLocal = metric;
minMetricSplitPosition = splitPos;
}
}
minMetric[coordIndex] = minMetricLocal;
minMetricSplit[coordIndex] = minMetricSplitPosition;
// sum of axis lengths for both left and right AABBs
}
PxU32 winIndex = 2;
if(minMetric[0] <= minMetric[1] && minMetric[0] <= minMetric[2])
winIndex = 0;
else if(minMetric[1] <= minMetric[2])
winIndex = 1;
const PxU32* rank = ranks3[winIndex];
const PxU32* order = orders3[winIndex];
if(clusterSize == nbTotalBounds) // AP: about 4% gain from this special case optimization
{
// if this is a full cluster sort, we already have it done
for(PxU32 i = 0; i < clusterSize; i ++)
permute[i] = order[i];
} else
{
// sort the tempRanks
for(PxU32 i = 0; i < clusterSize; i ++)
tempRanks[i] = rank[permute[i]];
PxSort(tempRanks, clusterSize);
for(PxU32 i = 0; i < clusterSize; i ++)
permute[i] = order[tempRanks[i]];
}
PxU32 splitPoint = minMetricSplit[winIndex];
if(clusterSize == 3 && splitPoint == 0)
splitPoint = 1; // special case due to rounding
return splitPoint;
}
// compute surface area for a given split
PxF32 computeSA(const PxU32* permute, const Interval& split) // both permute and i are relative
{
PX_ASSERT(split.count >= 1);
Vec3V bmn = allBounds[permute[split.start]].mn;
Vec3V bmx = allBounds[permute[split.start]].mx;
for(PxU32 i = 1; i < split.count; i++)
{
const PxBounds3V& b1 = allBounds[permute[split.start+i]];
bmn = V3Min(bmn, b1.mn); bmx = V3Max(bmx, b1.mx);
}
PxF32 ret; PxSAH(V3Sub(bmx, bmn), ret);
return ret;
}
////////////////////////////////////////////////////////////////////
// main SAH sort routine
void sort4(PxU32* permute, PxU32 clusterSize,
PxArray<RTreeNodeNQ>& resultTree, PxU32& maxLevels, PxU32 level = 0, RTreeNodeNQ* parentNode = NULL)
{
PX_UNUSED(parentNode);
if(level == 0)
maxLevels = 1;
else
maxLevels = PxMax(maxLevels, level+1);
PxU32 splitPos[RTREE_N];
for(PxU32 j = 0; j < RTREE_N; j++)
splitPos[j] = j+1;
if(clusterSize >= RTREE_N)
{
// split into RTREE_N number of regions via RTREE_N-1 subsequent splits
// each split is represented as a current interval
// we iterate over currently active intervals and compute it's surface area
// then we split the interval with maximum surface area
// AP scaffold: possible optimization - seems like computeSA can be cached for unchanged intervals
PxInlineArray<Interval, 1024> splits;
splits.pushBack(Interval(0, clusterSize));
for(PxU32 iSplit = 0; iSplit < RTREE_N-1; iSplit++)
{
PxF32 maxSAH = -FLT_MAX;
PxU32 maxSplit = 0xFFFFffff;
for(PxU32 i = 0; i < splits.size(); i++)
{
if(splits[i].count == 1)
continue;
PxF32 SAH = computeSA(permute, splits[i])*splits[i].count;
if(SAH > maxSAH)
{
maxSAH = SAH;
maxSplit = i;
}
}
PX_ASSERT(maxSplit != 0xFFFFffff);
// maxSplit is now the index of the interval in splits array with maximum surface area
// we now split it into 2 using the split() function
Interval old = splits[maxSplit];
PX_ASSERT(old.count > 1);
PxU32 splitLocal = split(permute+old.start, old.count); // relative split pos
PX_ASSERT(splitLocal >= 1);
PX_ASSERT(old.count-splitLocal >= 1);
splits.pushBack(Interval(old.start, splitLocal));
splits.pushBack(Interval(old.start+splitLocal, old.count-splitLocal));
splits.replaceWithLast(maxSplit);
splitPos[iSplit] = old.start+splitLocal;
}
// verification code, make sure split counts add up to clusterSize
PX_ASSERT(splits.size() == RTREE_N);
PxU32 sum = 0;
PX_UNUSED(sum);
for(PxU32 j = 0; j < RTREE_N; j++)
sum += splits[j].count;
PX_ASSERT(sum == clusterSize);
}
else // clusterSize < RTREE_N
{
// make it so splitCounts based on splitPos add up correctly for small cluster sizes
for(PxU32 i = clusterSize; i < RTREE_N-1; i++)
splitPos[i] = clusterSize;
}
// sort splitPos index array using quicksort (just a few values)
PxSort(splitPos, RTREE_N-1);
splitPos[RTREE_N-1] = clusterSize; // splitCount[n] is computed as splitPos[n+1]-splitPos[n], so we need to add this last value
// now compute splitStarts and splitCounts from splitPos[] array. Also perform a bunch of correctness verification
PxU32 splitStarts[RTREE_N];
PxU32 splitCounts[RTREE_N];
splitStarts[0] = 0;
splitCounts[0] = splitPos[0];
PxU32 sumCounts = splitCounts[0];
PX_UNUSED(sumCounts);
for(PxU32 j = 1; j < RTREE_N; j++)
{
splitStarts[j] = splitPos[j-1];
PX_ASSERT(splitStarts[j-1]<=splitStarts[j]);
splitCounts[j] = splitPos[j]-splitPos[j-1];
PX_ASSERT(splitCounts[j] > 0 || clusterSize < RTREE_N);
sumCounts += splitCounts[j];
PX_ASSERT(splitStarts[j-1]+splitCounts[j-1]<=splitStarts[j]);
}
PX_ASSERT(sumCounts == clusterSize);
PX_ASSERT(splitStarts[RTREE_N-1]+splitCounts[RTREE_N-1]<=clusterSize);
// mark this cluster as terminal based on clusterSize <= stopAtTrisPerPage parameter for current iTradeOff user specified preset
bool terminalClusterByTotalCount = (clusterSize <= stopAtTrisPerPage[iTradeOff]);
// iterate over splitCounts for the current cluster, if any of counts exceed 16 (which is the maximum supported by LeafTriangles
// we cannot mark this cluster as terminal (has to be split more)
for(PxU32 s = 0; s < RTREE_N; s++)
if(splitCounts[s] > 16) // LeafTriangles doesn't support > 16 tris
terminalClusterByTotalCount = false;
// iterate over all the splits
for(PxU32 s = 0; s < RTREE_N; s++)
{
RTreeNodeNQ rtn;
PxU32 splitCount = splitCounts[s];
if(splitCount > 0) // splits shouldn't be empty generally
{
// sweep left to right and compute min and max SAH for each individual bound in current split
PxBounds3V b = allBounds[permute[splitStarts[s]]];
PxF32 sahMin; PxSAH(b.getExtents(), sahMin);
PxF32 sahMax = sahMin;
// AP scaffold - looks like this could be optimized (we are recomputing bounds top down)
for(PxU32 i = 1; i < splitCount; i++)
{
PxU32 localIndex = i + splitStarts[s];
const PxBounds3V& b1 = allBounds[permute[localIndex]];
PxF32 sah1; PxSAH(b1.getExtents(), sah1);
sahMin = PxMin(sahMin, sah1);
sahMax = PxMax(sahMax, sah1);
b.include(b1);
}
rtn.bounds.minimum = V3ReadXYZ(b.mn);
rtn.bounds.maximum = V3ReadXYZ(b.mx);
// if bounds differ widely (according to some heuristic preset), we continue splitting
// this is important for a mixed cluster with large and small triangles
bool okSAH = (sahMax/sahMin < 40.0f);
if(!okSAH)
terminalClusterByTotalCount = false; // force splitting this cluster
bool stopSplitting = // compute the final splitting criterion
splitCount <= 2 || (okSAH && splitCount <= 3) // stop splitting at 2 nodes or if SAH ratio is OK and splitCount <= 3
|| terminalClusterByTotalCount || splitCount <= stopAtTrisPerLeaf[iTradeOff];
if(stopSplitting)
{
// this is a terminal page then, mark as such
// first node index is relative to the top level input array beginning
rtn.childPageFirstNodeIndex = PxI32(splitStarts[s]+(permute-permuteStart));
rtn.leafCount = PxI32(splitCount);
PX_ASSERT(splitCount <= 16); // LeafTriangles doesn't support more
}
else
{
// this is not a terminal page, we will recompute this later, after we recurse on subpages (label ZZZ)
rtn.childPageFirstNodeIndex = -1;
rtn.leafCount = 0;
}
}
else // splitCount == 0 at this point, this is an empty paddding node (with current presets it's very rare)
{
PX_ASSERT(splitCount == 0);
rtn.bounds.setEmpty();
rtn.childPageFirstNodeIndex = -1;
rtn.leafCount = -1;
}
resultTree.pushBack(rtn); // push the new node into the resultTree array
}
if(terminalClusterByTotalCount) // abort recursion if terminal cluster
return;
// recurse on subpages
PxU32 parentIndex = resultTree.size() - RTREE_N; // save the parentIndex as specified (array can be resized during recursion)
for(PxU32 s = 0; s<RTREE_N; s++)
{
RTreeNodeNQ* sParent = &resultTree[parentIndex+s]; // array can be resized and relocated during recursion
if(sParent->leafCount == 0) // only split pages that were marked as non-terminal during splitting (see "label ZZZ" above)
{
// all child nodes will be pushed inside of this recursive call,
// so we set the child pointer for parent node to resultTree.size()
sParent->childPageFirstNodeIndex = PxI32(resultTree.size());
sort4(permute+splitStarts[s], splitCounts[s], resultTree, maxLevels, level+1, sParent);
}
}
}
};
/////////////////////////////////////////////////////////////////////////
// initializes the input permute array with identity permutation
// and shuffles it so that new sorted index, newIndex = resultPermute[oldIndex]
static void buildFromBounds(
Gu::RTree& result, const PxBounds3V* allBounds, PxU32 numBounds,
PxArray<PxU32>& permute, RTreeCooker::RemapCallback* rc, Vec3VArg allMn, Vec3VArg allMx,
PxReal sizePerfTradeOff01, PxMeshCookingHint::Enum hint)
{
PX_UNUSED(sizePerfTradeOff01);
PxBounds3V treeBounds(allMn, allMx);
// start off with an identity permutation
permute.resize(0);
permute.reserve(numBounds+1);
for(PxU32 j = 0; j < numBounds; j ++)
permute.pushBack(j);
const PxU32 sentinel = 0xABCDEF01;
permute.pushBack(sentinel);
// load sorted nodes into an RTreeNodeNQ tree representation
// build the tree structure from sorted nodes
const PxU32 pageSize = RTREE_N;
PxArray<RTreeNodeNQ> resultTree;
resultTree.reserve(numBounds*2);
PxU32 maxLevels = 0;
if(hint == PxMeshCookingHint::eSIM_PERFORMANCE) // use high quality SAH build
{
PxArray<PxU32> xRanks(numBounds), yRanks(numBounds), zRanks(numBounds), xOrder(numBounds), yOrder(numBounds), zOrder(numBounds);
PxMemCopy(xOrder.begin(), permute.begin(), sizeof(xOrder[0])*numBounds);
PxMemCopy(yOrder.begin(), permute.begin(), sizeof(yOrder[0])*numBounds);
PxMemCopy(zOrder.begin(), permute.begin(), sizeof(zOrder[0])*numBounds);
// sort by shuffling the permutation, precompute sorted ranks for x,y,z-orders
PxSort(xOrder.begin(), xOrder.size(), SortBoundsPredicate(0, allBounds));
for(PxU32 i = 0; i < numBounds; i++) xRanks[xOrder[i]] = i;
PxSort(yOrder.begin(), yOrder.size(), SortBoundsPredicate(1, allBounds));
for(PxU32 i = 0; i < numBounds; i++) yRanks[yOrder[i]] = i;
PxSort(zOrder.begin(), zOrder.size(), SortBoundsPredicate(2, allBounds));
for(PxU32 i = 0; i < numBounds; i++) zRanks[zOrder[i]] = i;
SubSortSAH ss(permute.begin(), allBounds, numBounds,
xOrder.begin(), yOrder.begin(), zOrder.begin(), xRanks.begin(), yRanks.begin(), zRanks.begin(), sizePerfTradeOff01);
ss.sort4(permute.begin(), numBounds, resultTree, maxLevels);
} else
{ // use fast cooking path
PX_ASSERT(hint == PxMeshCookingHint::eCOOKING_PERFORMANCE);
SubSortQuick ss(permute.begin(), allBounds, numBounds, sizePerfTradeOff01);
PxBounds3V discard((PxBounds3V::U()));
ss.sort4(permute.begin(), permute.size()-1, resultTree, maxLevels, discard); // AP scaffold: need to implement build speed/runtime perf slider
}
PX_ASSERT(permute[numBounds] == sentinel); // verify we didn't write past the array
permute.popBack(); // discard the sentinel value
#if PRINT_RTREE_COOKING_STATS // stats code
PxU32 totalLeafTris = 0;
PxU32 numLeaves = 0;
PxI32 maxLeafTris = 0;
PxU32 numEmpty = 0;
for(PxU32 i = 0; i < resultTree.size(); i++)
{
PxI32 leafCount = resultTree[i].leafCount;
numEmpty += (resultTree[i].bounds.isEmpty());
if(leafCount > 0)
{
numLeaves++;
totalLeafTris += leafCount;
if(leafCount > maxLeafTris)
maxLeafTris = leafCount;
}
}
printf("AABBs total/empty=%d/%d\n", resultTree.size(), numEmpty);
printf("numTris=%d, numLeafAABBs=%d, avgTrisPerLeaf=%.2f, maxTrisPerLeaf = %d\n",
numBounds, numLeaves, PxF32(totalLeafTris)/numLeaves, maxLeafTris);
#endif
PX_ASSERT(RTREE_N*sizeof(RTreeNodeQ) == sizeof(RTreePage)); // needed for nodePtrMultiplier computation to be correct
const int nodePtrMultiplier = sizeof(RTreeNodeQ); // convert offset as count in qnodes to page ptr
// Quantize the tree. AP scaffold - might be possible to merge this phase with the page pass below this loop
PxArray<RTreeNodeQ> qtreeNodes;
PxU32 firstEmptyIndex = PxU32(-1);
PxU32 resultCount = resultTree.size();
qtreeNodes.reserve(resultCount);
for(PxU32 i = 0; i < resultCount; i++) // AP scaffold - eliminate this pass
{
RTreeNodeNQ & u = resultTree[i];
RTreeNodeQ q;
q.setLeaf(u.leafCount > 0); // set the leaf flag
if(u.childPageFirstNodeIndex == -1) // empty node?
{
if(firstEmptyIndex == PxU32(-1))
firstEmptyIndex = qtreeNodes.size();
q.minx = q.miny = q.minz = FLT_MAX; // AP scaffold improvement - use empty 1e30 bounds instead and reference a valid leaf
q.maxx = q.maxy = q.maxz = -FLT_MAX; // that will allow to remove the empty node test from the runtime
q.ptr = firstEmptyIndex*nodePtrMultiplier; PX_ASSERT((q.ptr & 1) == 0);
q.setLeaf(true); // label empty node as leaf node
} else
{
// non-leaf node
q.minx = u.bounds.minimum.x;
q.miny = u.bounds.minimum.y;
q.minz = u.bounds.minimum.z;
q.maxx = u.bounds.maximum.x;
q.maxy = u.bounds.maximum.y;
q.maxz = u.bounds.maximum.z;
if(u.leafCount > 0)
{
q.ptr = PxU32(u.childPageFirstNodeIndex);
rc->remap(&q.ptr, q.ptr, PxU32(u.leafCount));
PX_ASSERT(q.isLeaf()); // remap is expected to set the isLeaf bit
}
else
{
// verify that all children bounds are included in the parent bounds
for(PxU32 s = 0; s < RTREE_N; s++)
{
const RTreeNodeNQ& child = resultTree[u.childPageFirstNodeIndex+s];
PX_UNUSED(child);
// is a sentinel node or is inside parent's bounds
PX_ASSERT(child.leafCount == -1 || child.bounds.isInside(u.bounds));
}
q.ptr = PxU32(u.childPageFirstNodeIndex * nodePtrMultiplier);
PX_ASSERT(q.ptr % RTREE_N == 0);
q.setLeaf(false);
}
}
qtreeNodes.pushBack(q);
}
// build the final rtree image
result.mInvDiagonal = PxVec4(1.0f);
PX_ASSERT(qtreeNodes.size() % RTREE_N == 0);
result.mTotalNodes = qtreeNodes.size();
result.mTotalPages = result.mTotalNodes / pageSize;
result.mPages = static_cast<RTreePage*>(
PxAlignedAllocator<128>().allocate(sizeof(RTreePage)*result.mTotalPages, PX_FL));
result.mBoundsMin = PxVec4(V3ReadXYZ(treeBounds.mn), 0.0f);
result.mBoundsMax = PxVec4(V3ReadXYZ(treeBounds.mx), 0.0f);
result.mDiagonalScaler = (result.mBoundsMax - result.mBoundsMin) / 65535.0f;
result.mPageSize = pageSize;
result.mNumLevels = maxLevels;
PX_ASSERT(result.mTotalNodes % pageSize == 0);
result.mNumRootPages = 1;
for(PxU32 j = 0; j < result.mTotalPages; j++)
{
RTreePage& page = result.mPages[j];
for(PxU32 k = 0; k < RTREE_N; k ++)
{
const RTreeNodeQ& n = qtreeNodes[j*RTREE_N+k];
page.maxx[k] = n.maxx;
page.maxy[k] = n.maxy;
page.maxz[k] = n.maxz;
page.minx[k] = n.minx;
page.miny[k] = n.miny;
page.minz[k] = n.minz;
page.ptrs[k] = n.ptr;
}
}
//printf("Tree size=%d\n", result.mTotalPages*sizeof(RTreePage));
#if PX_DEBUG
result.validate(); // make sure the child bounds are included in the parent and other validation
#endif
}
} // namespace physx
| 39,140 | C++ | 39.30999 | 147 | 0.689474 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingBigConvexDataBuilder.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "cooking/PxCooking.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxUtilities.h"
#include "foundation/PxVecMath.h"
#include "GuConvexMeshData.h"
#include "GuBigConvexData2.h"
#include "GuIntersectionRayPlane.h"
#include "GuCookingBigConvexDataBuilder.h"
#include "GuCookingConvexHullBuilder.h"
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
using namespace physx;
using namespace Gu;
using namespace aos;
static const PxU32 gSupportVersion = 0;
static const PxU32 gVersion = 0;
BigConvexDataBuilder::BigConvexDataBuilder(const Gu::ConvexHullData* hull, BigConvexData* gm, const PxVec3* hullVerts) : mHullVerts(hullVerts)
{
mSVM = gm;
mHull = hull;
}
BigConvexDataBuilder::~BigConvexDataBuilder()
{
}
bool BigConvexDataBuilder::initialize()
{
mSVM->mData.mSamples = PX_ALLOCATE(PxU8, mSVM->mData.mNbSamples*2u, "mData.mSamples");
#if PX_DEBUG
// printf("SVM: %d bytes\n", mNbSamples*sizeof(PxU8)*2);
#endif
return true;
}
bool BigConvexDataBuilder::save(PxOutputStream& stream, bool platformMismatch) const
{
// Export header
if(!Cm::WriteHeader('S', 'U', 'P', 'M', gSupportVersion, platformMismatch, stream))
return false;
// Save base gaussmap
// if(!GaussMapBuilder::Save(stream, platformMismatch)) return false;
// Export header
if(!Cm::WriteHeader('G', 'A', 'U', 'S', gVersion, platformMismatch, stream))
return false;
// Export basic info
// stream.StoreDword(mSubdiv);
writeDword(mSVM->mData.mSubdiv, platformMismatch, stream); // PT: could now write Word here
// stream.StoreDword(mNbSamples);
writeDword(mSVM->mData.mNbSamples, platformMismatch, stream); // PT: could now write Word here
// Save map data
// It's an array of bytes so we don't care about 'PlatformMismatch'
stream.write(mSVM->mData.mSamples, sizeof(PxU8)*mSVM->mData.mNbSamples*2);
if(!saveValencies(stream, platformMismatch))
return false;
return true;
}
//////////////////////////////////////////////////////////////////////////
// compute valencies for each vertex
// we dont compute the edges again here, we have them temporary stored in mHullDataFacesByAllEdges8 structure
bool BigConvexDataBuilder::computeValencies(const ConvexHullBuilder& meshBuilder)
{
// Create valencies
const PxU32 numVertices = meshBuilder.mHull->mNbHullVertices;
mSVM->mData.mNbVerts = numVertices;
// Get ram for valencies and adjacent verts
const PxU32 numAlignedVerts = (numVertices+3)&~3;
const PxU32 TotalSize = sizeof(Gu::Valency)*numAlignedVerts + sizeof(PxU8)*meshBuilder.mHull->mNbEdges*2u;
mSVM->mVBuffer = PX_ALLOC(TotalSize, "BigConvexData data");
mSVM->mData.mValencies = reinterpret_cast<Gu::Valency*>(mSVM->mVBuffer);
mSVM->mData.mAdjacentVerts = (reinterpret_cast<PxU8*>(mSVM->mVBuffer)) + sizeof(Gu::Valency)*numAlignedVerts;
PxMemZero(mSVM->mData.mValencies, numVertices*sizeof(Gu::Valency));
PxU8 vertexMarker[256];
PxMemZero(vertexMarker,numVertices);
// Compute valencies
for (PxU32 i = 0; i < meshBuilder.mHull->mNbPolygons; i++)
{
const PxU32 numVerts = meshBuilder.mHullDataPolygons[i].mNbVerts;
const PxU8* Data = meshBuilder.mHullDataVertexData8 + meshBuilder.mHullDataPolygons[i].mVRef8;
for (PxU32 j = 0; j < numVerts; j++)
{
mSVM->mData.mValencies[Data[j]].mCount++;
PX_ASSERT(mSVM->mData.mValencies[Data[j]].mCount != 0xffff);
}
}
// Create offsets
mSVM->CreateOffsets();
// mNbAdjVerts = mOffsets[mNbVerts-1] + mValencies[mNbVerts-1];
mSVM->mData.mNbAdjVerts = PxU32(mSVM->mData.mValencies[mSVM->mData.mNbVerts - 1].mOffset + mSVM->mData.mValencies[mSVM->mData.mNbVerts - 1].mCount);
PX_ASSERT(mSVM->mData.mNbAdjVerts == PxU32(meshBuilder.mHull->mNbEdges * 2));
// Create adjacent vertices
// parse the polygons and its vertices
for (PxU32 i = 0; i < meshBuilder.mHull->mNbPolygons; i++)
{
PxU32 numVerts = meshBuilder.mHullDataPolygons[i].mNbVerts;
const PxU8* Data = meshBuilder.mHullDataVertexData8 + meshBuilder.mHullDataPolygons[i].mVRef8;
for (PxU32 j = 0; j < numVerts; j++)
{
const PxU8 vertexIndex = Data[j];
PxU8 numAdj = 0;
// if we did not parsed this vertex, traverse to the adjacent face and then
// again to next till we hit back the original polygon
if(vertexMarker[vertexIndex] == 0)
{
PxU8 prevIndex = Data[(j+1)%numVerts];
mSVM->mData.mAdjacentVerts[mSVM->mData.mValencies[vertexIndex].mOffset++] = prevIndex;
numAdj++;
// now traverse the neighbors
const PxU16 edgeIndex = PxU16(meshBuilder.mEdgeData16[meshBuilder.mHullDataPolygons[i].mVRef8 + j]*2);
PxU8 n0 = meshBuilder.mHullDataFacesByEdges8[edgeIndex];
PxU8 n1 = meshBuilder.mHullDataFacesByEdges8[edgeIndex + 1];
PxU32 neighborPolygon = n0 == i ? n1 : n0;
while (neighborPolygon != i)
{
PxU32 numNeighborVerts = meshBuilder.mHullDataPolygons[neighborPolygon].mNbVerts;
const PxU8* neighborData = meshBuilder.mHullDataVertexData8 + meshBuilder.mHullDataPolygons[neighborPolygon].mVRef8;
PxU32 nextEdgeIndex = 0;
// search in the neighbor face for the tested vertex
for (PxU32 k = 0; k < numNeighborVerts; k++)
{
// search the vertexIndex
if(neighborData[k] == vertexIndex)
{
const PxU8 nextIndex = neighborData[(k+1)%numNeighborVerts];
// next index already there, pick the previous
if(nextIndex == prevIndex)
{
prevIndex = k == 0 ? neighborData[numNeighborVerts - 1] : neighborData[k-1];
nextEdgeIndex = k == 0 ? numNeighborVerts - 1 : k-1;
}
else
{
prevIndex = nextIndex;
nextEdgeIndex = k;
}
mSVM->mData.mAdjacentVerts[mSVM->mData.mValencies[vertexIndex].mOffset++] = prevIndex;
numAdj++;
break;
}
}
// now move to next neighbor
const PxU16 edgeIndex2 = PxU16(meshBuilder.mEdgeData16[(meshBuilder.mHullDataPolygons[neighborPolygon].mVRef8 + nextEdgeIndex)]*2);
n0 = meshBuilder.mHullDataFacesByEdges8[edgeIndex2];
n1 = meshBuilder.mHullDataFacesByEdges8[edgeIndex2 + 1];
neighborPolygon = n0 == neighborPolygon ? n1 : n0;
}
vertexMarker[vertexIndex] = numAdj;
}
}
}
// Recreate offsets
mSVM->CreateOffsets();
return true;
}
//////////////////////////////////////////////////////////////////////////
// compute the min dot product from the verts for given dir
void BigConvexDataBuilder::precomputeSample(const PxVec3& dir, PxU8& startIndex_, float negativeDir)
{
PxU8 startIndex = startIndex_;
const PxVec3* verts = mHullVerts;
const Valency* valency = mSVM->mData.mValencies;
const PxU8* adjacentVerts = mSVM->mData.mAdjacentVerts;
// we have only 256 verts
PxU32 smallBitMap[8] = {0,0,0,0,0,0,0,0};
float minimum = negativeDir * verts[startIndex].dot(dir);
PxU32 initialIndex = startIndex;
do
{
initialIndex = startIndex;
const PxU32 numNeighbours = valency[startIndex].mCount;
const PxU32 offset = valency[startIndex].mOffset;
for (PxU32 a = 0; a < numNeighbours; ++a)
{
const PxU8 neighbourIndex = adjacentVerts[offset + a];
const float dist = negativeDir * verts[neighbourIndex].dot(dir);
if (dist < minimum)
{
const PxU32 ind = PxU32(neighbourIndex >> 5);
const PxU32 mask = PxU32(1 << (neighbourIndex & 31));
if ((smallBitMap[ind] & mask) == 0)
{
smallBitMap[ind] |= mask;
minimum = dist;
startIndex = neighbourIndex;
}
}
}
} while (startIndex != initialIndex);
startIndex_ = startIndex;
}
//////////////////////////////////////////////////////////////////////////
// Precompute the min/max vertices for cube directions.
bool BigConvexDataBuilder::precompute(PxU32 subdiv)
{
mSVM->mData.mSubdiv = PxTo16(subdiv);
mSVM->mData.mNbSamples = PxTo16(6 * subdiv*subdiv);
if (!initialize())
return false;
PxU8 startIndex[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
PxU8 startIndex2[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
const float halfSubdiv = float(subdiv - 1) * 0.5f;
for (PxU32 j = 0; j < subdiv; j++)
{
for (PxU32 i = j; i < subdiv; i++)
{
const float iSubDiv = 1.0f - i / halfSubdiv;
const float jSubDiv = 1.0f - j / halfSubdiv;
PxVec3 tempDir(1.0f, iSubDiv, jSubDiv);
// we need to normalize only once, then we permute the components
// as before for each i,j and j,i face direction
tempDir.normalize();
const PxVec3 dirs[12] = {
PxVec3(-tempDir.x, tempDir.y, tempDir.z),
PxVec3(tempDir.x, tempDir.y, tempDir.z),
PxVec3(tempDir.z, -tempDir.x, tempDir.y),
PxVec3(tempDir.z, tempDir.x, tempDir.y),
PxVec3(tempDir.y, tempDir.z, -tempDir.x),
PxVec3(tempDir.y, tempDir.z, tempDir.x),
PxVec3(-tempDir.x, tempDir.z, tempDir.y),
PxVec3(tempDir.x, tempDir.z, tempDir.y),
PxVec3(tempDir.y, -tempDir.x, tempDir.z),
PxVec3(tempDir.y, tempDir.x, tempDir.z),
PxVec3(tempDir.z, tempDir.y, -tempDir.x),
PxVec3(tempDir.z, tempDir.y, tempDir.x)
};
// compute in each direction + negative/positive dot, we have
// then two start indexes, which are used then for hill climbing
for (PxU32 dStep = 0; dStep < 12; dStep++)
{
precomputeSample(dirs[dStep], startIndex[dStep], 1.0f);
precomputeSample(dirs[dStep], startIndex2[dStep], -1.0f);
}
// decompose the vector results into face directions
for (PxU32 k = 0; k < 6; k++)
{
const PxU32 ksub = k*subdiv*subdiv;
const PxU32 offset = j + i*subdiv + ksub;
const PxU32 offset2 = i + j*subdiv + ksub;
PX_ASSERT(offset < mSVM->mData.mNbSamples);
PX_ASSERT(offset2 < mSVM->mData.mNbSamples);
mSVM->mData.mSamples[offset] = startIndex[k];
mSVM->mData.mSamples[offset + mSVM->mData.mNbSamples] = startIndex2[k];
mSVM->mData.mSamples[offset2] = startIndex[k + 6];
mSVM->mData.mSamples[offset2 + mSVM->mData.mNbSamples] = startIndex2[k + 6];
}
}
}
return true;
}
static const PxU32 gValencyVersion = 2;
//////////////////////////////////////////////////////////////////////////
bool BigConvexDataBuilder::saveValencies(PxOutputStream& stream, bool platformMismatch) const
{
// Export header
if(!Cm::WriteHeader('V', 'A', 'L', 'E', gValencyVersion, platformMismatch, stream))
return false;
writeDword(mSVM->mData.mNbVerts, platformMismatch, stream);
writeDword(mSVM->mData.mNbAdjVerts, platformMismatch, stream);
{
PxU16* temp = PX_ALLOCATE(PxU16, mSVM->mData.mNbVerts, "tmp");
for(PxU32 i=0;i<mSVM->mData.mNbVerts;i++)
temp[i] = mSVM->mData.mValencies[i].mCount;
const PxU32 maxIndex = computeMaxIndex(temp, mSVM->mData.mNbVerts);
writeDword(maxIndex, platformMismatch, stream);
Cm::StoreIndices(PxTo16(maxIndex), mSVM->mData.mNbVerts, temp, stream, platformMismatch);
PX_FREE(temp);
}
stream.write(mSVM->mData.mAdjacentVerts, mSVM->mData.mNbAdjVerts);
return true;
}
| 12,637 | C++ | 34.6 | 199 | 0.6788 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingQuickHullConvexHullLib.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuCookingQuickHullConvexHullLib.h"
#include "GuCookingConvexHullUtils.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxUserAllocated.h"
#include "foundation/PxBitUtils.h"
#include "foundation/PxSort.h"
#include "foundation/PxUtilities.h"
#include "foundation/PxMath.h"
#include "foundation/PxPlane.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxMemory.h"
using namespace physx;
namespace local
{
//////////////////////////////////////////////////////////////////////////
static const float MIN_ADJACENT_ANGLE = 3.0f; // in degrees - result wont have two adjacent facets within this angle of each other.
static const float PLANE_THICKNES = 3.0f * PX_EPS_F32; // points within this distance are considered on a plane
static const float MAXDOT_MINANG = cosf(PxDegToRad(MIN_ADJACENT_ANGLE)); // adjacent angle for dot product tests
//////////////////////////////////////////////////////////////////////////
struct QuickHullFace;
class ConvexHull;
class HullPlanes;
//////////////////////////////////////////////////////////////////////////
template<typename T, bool useIndexing>
class MemBlock
{
public:
MemBlock(PxU32 preallocateSize)
: mPreallocateSize(preallocateSize), mCurrentBlock(0), mCurrentIndex(0)
{
PX_ASSERT(preallocateSize);
T* block = PX_ALLOCATE(T, preallocateSize, "Quickhull MemBlock");
mBlocks.pushBack(block);
}
MemBlock()
: mPreallocateSize(0), mCurrentBlock(0), mCurrentIndex(0)
{
}
void init(PxU32 preallocateSize)
{
PX_ASSERT(preallocateSize);
mPreallocateSize = preallocateSize;
T* block = PX_ALLOCATE(T, preallocateSize, "Quickhull MemBlock");
if(useIndexing)
{
for (PxU32 i = 0; i < mPreallocateSize; i++)
{
// placement new to index data
PX_PLACEMENT_NEW(&block[i], T)(i);
}
}
mBlocks.pushBack(block);
}
~MemBlock()
{
for (PxU32 i = 0; i < mBlocks.size(); i++)
{
PX_FREE(mBlocks[i]);
}
mBlocks.clear();
}
void reset()
{
for (PxU32 i = 0; i < mBlocks.size(); i++)
{
PX_FREE(mBlocks[i]);
}
mBlocks.clear();
mCurrentBlock = 0;
mCurrentIndex = 0;
init(mPreallocateSize);
}
T* getItem(PxU32 index)
{
const PxU32 block = index/mPreallocateSize;
const PxU32 itemIndex = index % mPreallocateSize;
PX_ASSERT(block <= mCurrentBlock);
PX_ASSERT(itemIndex < mPreallocateSize);
return &(mBlocks[block])[itemIndex];
}
T* getFreeItem()
{
PX_ASSERT(mPreallocateSize);
// check if we have enough space in block, otherwise allocate new block
if(mCurrentIndex < mPreallocateSize)
{
return &(mBlocks[mCurrentBlock])[mCurrentIndex++];
}
else
{
T* block = PX_ALLOCATE(T, mPreallocateSize, "Quickhull MemBlock");
mCurrentBlock++;
if (useIndexing)
{
for (PxU32 i = 0; i < mPreallocateSize; i++)
{
// placement new to index data
PX_PLACEMENT_NEW(&block[i], T)(mCurrentBlock*mPreallocateSize + i);
}
}
mBlocks.pushBack(block);
mCurrentIndex = 0;
return &(mBlocks[mCurrentBlock])[mCurrentIndex++];
}
}
private:
PxU32 mPreallocateSize;
PxU32 mCurrentBlock;
PxU32 mCurrentIndex;
PxArray<T*> mBlocks;
};
//////////////////////////////////////////////////////////////////////////
// representation of quick hull vertex
struct QuickHullVertex
{
PxVec3 point; // point vector
PxU32 index; // point index for compare
float dist; // distance from plane if necessary
QuickHullVertex* next; // link to next vertex, linked list used for conflict list
PX_FORCE_INLINE bool operator==(const QuickHullVertex& vertex) const
{
return index == vertex.index ? true : false;
}
PX_FORCE_INLINE bool operator <(const QuickHullVertex& vertex) const
{
return dist < vertex.dist ? true : false;
}
};
//////////////////////////////////////////////////////////////////////////
// representation of quick hull half edge
struct QuickHullHalfEdge
{
QuickHullHalfEdge() : prev(NULL), next(NULL), twin(NULL), face(NULL), edgeIndex(0xFFFFFFFF)
{
}
QuickHullHalfEdge(PxU32 )
: prev(NULL), next(NULL), twin(NULL), face(NULL), edgeIndex(0xFFFFFFFF)
{
}
QuickHullVertex tail; // tail vertex, head vertex is the tail of the twin
QuickHullHalfEdge* prev; // previous edge
QuickHullHalfEdge* next; // next edge
QuickHullHalfEdge* twin; // twin/opposite edge
QuickHullFace* face; // face where the edge belong
PxU32 edgeIndex; // edge index used for edge creation
PX_FORCE_INLINE const QuickHullVertex& getTail() const
{
return tail;
}
PX_FORCE_INLINE const QuickHullVertex& getHead() const
{
PX_ASSERT(twin);
return twin->tail;
}
PX_FORCE_INLINE void setTwin(QuickHullHalfEdge* edge)
{
twin = edge;
edge->twin = this;
}
PX_FORCE_INLINE QuickHullFace* getOppositeFace() const
{
return twin->face;
}
float getOppositeFaceDistance() const;
};
//////////////////////////////////////////////////////////////////////////
typedef PxArray<QuickHullVertex*> QuickHullVertexArray;
typedef PxArray<QuickHullHalfEdge*> QuickHullHalfEdgeArray;
typedef PxArray<QuickHullFace*> QuickHullFaceArray;
//////////////////////////////////////////////////////////////////////////
// representation of quick hull face
struct QuickHullFace
{
enum FaceState
{
eVISIBLE,
eDELETED,
eNON_CONVEX
};
QuickHullHalfEdge* edge; // starting edge
PxU16 numEdges; // num edges on the face
QuickHullVertex* conflictList; // conflict list, used to determine unclaimed vertices
PxVec3 normal; // Newell plane normal
float area; // face area
PxVec3 centroid; // face centroid
float planeOffset; // Newell plane offset
float expandOffset; // used for plane expansion if vertex limit reached
FaceState state; // face validity state
QuickHullFace* nextFace; // used to indicate next free face in faceList
PxU32 index; // face index for compare identification
PxU8 outIndex; // face index used for output descriptor
public:
QuickHullFace()
: edge(NULL), numEdges(0), conflictList(NULL), area(0.0f), planeOffset(0.0f), expandOffset(-FLT_MAX),
state(eVISIBLE), nextFace(NULL), outIndex(0)
{
}
QuickHullFace(PxU32 ind)
: edge(NULL), numEdges(0), conflictList(NULL), area(0.0f), planeOffset(0.0f), expandOffset(-FLT_MAX),
state(eVISIBLE), nextFace(NULL), index(ind), outIndex(0)
{
}
~QuickHullFace()
{
}
// get edge on index
PX_FORCE_INLINE QuickHullHalfEdge* getEdge(PxU32 i) const
{
QuickHullHalfEdge* he = edge;
while (i > 0)
{
he = he->next;
i--;
}
return he;
}
// distance from a plane to provided point
PX_FORCE_INLINE float distanceToPlane(const PxVec3 p) const
{
return normal.dot(p) - planeOffset;
}
// compute face normal and centroid
PX_FORCE_INLINE void computeNormalAndCentroid()
{
PX_ASSERT(edge);
normal = PxVec3(PxZero);
numEdges = 1;
QuickHullHalfEdge* testEdge = edge;
QuickHullHalfEdge* startEdge = NULL;
float maxDist = -1.0f;
for (PxU32 i = 0; i < 3; i++)
{
const float d = (testEdge->tail.point - testEdge->next->tail.point).magnitudeSquared();
if (d > maxDist)
{
maxDist = d;
startEdge = testEdge;
}
testEdge = testEdge->next;
}
PX_ASSERT(startEdge);
QuickHullHalfEdge* he = startEdge->next;
const PxVec3& p0 = startEdge->tail.point;
const PxVec3 d = he->tail.point - p0;
centroid = startEdge->tail.point;
do
{
numEdges++;
centroid += he->tail.point;
normal += d.cross(he->next->tail.point - p0);
he = he->next;
} while (he != startEdge);
area = normal.normalize();
centroid *= (1.0f / float(numEdges));
planeOffset = normal.dot(centroid);
}
// merge adjacent face
bool mergeAdjacentFace(QuickHullHalfEdge* halfEdge, QuickHullFaceArray& discardedFaces);
// check face consistency
bool checkFaceConsistency();
private:
// connect halfedges
QuickHullFace* connectHalfEdges(QuickHullHalfEdge* hedgePrev, QuickHullHalfEdge* hedge);
// check if the face does have only 3 vertices
PX_FORCE_INLINE bool isTriangle() const
{
return numEdges == 3 ? true : false;
}
};
//////////////////////////////////////////////////////////////////////////
struct QuickHullResult
{
enum Enum
{
eSUCCESS, // ok
eZERO_AREA_TEST_FAILED, // area test failed for simplex
eVERTEX_LIMIT_REACHED, // vertex limit reached need to expand hull
ePOLYGONS_LIMIT_REACHED, // polygons hard limit reached
eFAILURE // general failure
};
};
//////////////////////////////////////////////////////////////////////////
// Quickhull base class holding the hull during construction
class QuickHull : public PxUserAllocated
{
PX_NOCOPY(QuickHull)
public:
QuickHull(const PxCookingParams& params, const PxConvexMeshDesc& desc);
~QuickHull();
// preallocate the edges, faces, vertices
void preallocate(PxU32 numVertices);
// parse the input verts, store them into internal format
void parseInputVertices(const PxVec3* verts, PxU32 numVerts);
// release the hull and data
void releaseHull();
// sets the precomputed min/max data
void setPrecomputedMinMax(const QuickHullVertex* minVertex,const QuickHullVertex* maxVertex, const float tolerance,const float planeTolerance);
// main entry function to build the hull from provided points
QuickHullResult::Enum buildHull();
PxU32 maxNumVertsPerFace() const;
protected:
// compute min max verts
void computeMinMaxVerts();
// find the initial simplex
bool findSimplex();
// add the initial simplex
// returns true if the operation was successful, false otherwise
bool addSimplex(QuickHullVertex* simplex, bool flipTriangle);
// finds next point to add
QuickHullVertex* nextPointToAdd(QuickHullFace*& eyeFace);
// adds point to the hull
bool addPointToHull(const QuickHullVertex* vertex, QuickHullFace& face, bool& addFailed);
// creates new face from given triangles
QuickHullFace* createTriangle(const QuickHullVertex& v0, const QuickHullVertex& v1, const QuickHullVertex& v2);
// adds point to the face conflict list
void addPointToFace(QuickHullFace& face, QuickHullVertex* vertex, float dist);
// removes eye point from the face conflict list
void removeEyePointFromFace(QuickHullFace& face, const QuickHullVertex* vertex);
// calculate the horizon fro the eyePoint against a given face
void calculateHorizon(const PxVec3& eyePoint, QuickHullHalfEdge* edge, QuickHullFace& face, QuickHullHalfEdgeArray& horizon, QuickHullFaceArray& removedFaces);
// adds new faces from given horizon and eyePoint
void addNewFacesFromHorizon(const QuickHullVertex* eyePoint, const QuickHullHalfEdgeArray& horizon, QuickHullFaceArray& newFaces);
// merge adjacent face
bool doAdjacentMerge(QuickHullFace& face, bool mergeWrtLargeFace, bool& mergeFailed);
// merge adjacent face doing normal test
bool doPostAdjacentMerge(QuickHullFace& face, const float minAngle);
// delete face points
void deleteFacePoints(QuickHullFace& faceToDelete, QuickHullFace* absorbingFace);
// resolve unclaimed points
void resolveUnclaimedPoints(const QuickHullFaceArray& newFaces);
// merges polygons with similar normals
void postMergeHull();
// check if 2 faces can be merged
bool canMergeFaces(const QuickHullHalfEdge& he);
// get next free face
PX_FORCE_INLINE QuickHullFace* getFreeHullFace()
{
return mFreeFaces.getFreeItem();
}
// get next free half edge
PX_FORCE_INLINE QuickHullHalfEdge* getFreeHullHalfEdge()
{
return mFreeHalfEdges.getFreeItem();
}
PX_FORCE_INLINE PxU32 getNbHullVerts() { return mOutputNumVertices; }
protected:
friend class physx::QuickHullConvexHullLib;
const PxCookingParams& mCookingParams; // cooking params
const PxConvexMeshDesc& mConvexDesc; // convex desc
PxVec3 mInteriorPoint; // interior point for int/ext tests
PxU32 mMaxVertices; // maximum number of vertices (can be different as we may add vertices during the cleanup
PxU32 mNumVertices; // actual number of input vertices
PxU32 mOutputNumVertices; // num vertices of the computed hull
PxU32 mTerminalVertex; // in case we failed to generate hull in a regular run we set the terminal vertex and rerun
QuickHullVertex* mVerticesList; // vertices list preallocated
MemBlock<QuickHullHalfEdge, false> mFreeHalfEdges; // free half edges
MemBlock<QuickHullFace, true> mFreeFaces; // free faces
QuickHullFaceArray mHullFaces; // actual hull faces, contains also invalid and not used faces
PxU32 mNumHullFaces; // actual number of hull faces
bool mPrecomputedMinMax; // if we got the precomputed min/max values
QuickHullVertex mMinVertex[3]; // min vertex
QuickHullVertex mMaxVertex[3]; // max vertex
float mTolerance; // hull tolerance, used for plane thickness and merge strategy
float mPlaneTolerance; // used for post merge stage
QuickHullVertexArray mUnclaimedPoints; // holds temp unclaimed points
QuickHullHalfEdgeArray mHorizon; // array for horizon computation
QuickHullFaceArray mNewFaces; // new faces created during horizon computation
QuickHullFaceArray mRemovedFaces; // removd faces during horizon computation
QuickHullFaceArray mDiscardedFaces; // discarded faces during face merging
};
//////////////////////////////////////////////////////////////////////////
// return the distance from opposite face
float QuickHullHalfEdge::getOppositeFaceDistance() const
{
PX_ASSERT(face);
PX_ASSERT(twin);
return face->distanceToPlane(twin->face->centroid);
}
//////////////////////////////////////////////////////////////////////////
// merge adjacent face from provided half edge.
// 1. set new half edges
// 2. connect the new half edges - check we did not produced redundant triangles, discard them
// 3. recompute the plane and check consistency
// Returns false if merge failed
bool QuickHullFace::mergeAdjacentFace(QuickHullHalfEdge* hedgeAdj, QuickHullFaceArray& discardedFaces)
{
QuickHullFace* oppFace = hedgeAdj->getOppositeFace();
discardedFaces.pushBack(oppFace);
oppFace->state = QuickHullFace::eDELETED;
QuickHullHalfEdge* hedgeOpp = hedgeAdj->twin;
QuickHullHalfEdge* hedgeAdjPrev = hedgeAdj->prev;
QuickHullHalfEdge* hedgeAdjNext = hedgeAdj->next;
QuickHullHalfEdge* hedgeOppPrev = hedgeOpp->prev;
QuickHullHalfEdge* hedgeOppNext = hedgeOpp->next;
// check if we are lining up with the face in adjPrev dir
QuickHullHalfEdge* breakEdge = hedgeAdjPrev;
while (hedgeAdjPrev->getOppositeFace() == oppFace)
{
hedgeAdjPrev = hedgeAdjPrev->prev;
hedgeOppNext = hedgeOppNext->next;
// Edge case merge face is degenerated and we need to abort merging
if (hedgeAdjPrev == breakEdge)
{
return false;
}
}
// check if we are lining up with the face in adjNext dir
breakEdge = hedgeAdjNext;
while (hedgeAdjNext->getOppositeFace() == oppFace)
{
hedgeOppPrev = hedgeOppPrev->prev;
hedgeAdjNext = hedgeAdjNext->next;
// Edge case merge face is degenerated and we need to abort merging
if (hedgeAdjNext == breakEdge)
{
return false;
}
}
QuickHullHalfEdge* hedge;
// set new face owner for the line up edges
for (hedge = hedgeOppNext; hedge != hedgeOppPrev->next; hedge = hedge->next)
{
hedge->face = this;
}
// if we are about to delete the shared edge, check if its not the starting edge of the face
if (hedgeAdj == edge)
{
edge = hedgeAdjNext;
}
// handle the half edges at the head
QuickHullFace* discardedFace;
discardedFace = connectHalfEdges(hedgeOppPrev, hedgeAdjNext);
if (discardedFace != NULL)
{
discardedFaces.pushBack(discardedFace);
}
// handle the half edges at the tail
discardedFace = connectHalfEdges(hedgeAdjPrev, hedgeOppNext);
if (discardedFace != NULL)
{
discardedFaces.pushBack(discardedFace);
}
computeNormalAndCentroid();
PX_ASSERT(checkFaceConsistency());
return true;
}
//////////////////////////////////////////////////////////////////////////
// connect half edges of 2 adjacent faces
// if we find redundancy - edges are in a line, we drop the addional face if it is just a skinny triangle
QuickHullFace* QuickHullFace::connectHalfEdges(QuickHullHalfEdge* hedgePrev, QuickHullHalfEdge* hedge)
{
QuickHullFace* discardedFace = NULL;
// redundant edge - can be in a line
if (hedgePrev->getOppositeFace() == hedge->getOppositeFace())
{
// then there is a redundant edge that we can get rid off
QuickHullFace* oppFace = hedge->getOppositeFace();
QuickHullHalfEdge* hedgeOpp;
if (hedgePrev == edge)
{
edge = hedge;
}
// check if its not a skinny face with just 3 vertices - 3 edges
if (oppFace->isTriangle())
{
// then we can get rid of the opposite face altogether
hedgeOpp = hedge->twin->prev->twin;
oppFace->state = QuickHullFace::eDELETED;
discardedFace = oppFace;
}
else
{
// if not triangle, merge the 2 opposite halfedges into one
hedgeOpp = hedge->twin->next;
if (oppFace->edge == hedgeOpp->prev)
{
oppFace->edge = hedgeOpp;
}
hedgeOpp->prev = hedgeOpp->prev->prev;
hedgeOpp->prev->next = hedgeOpp;
}
hedge->prev = hedgePrev->prev;
hedge->prev->next = hedge;
hedge->twin = hedgeOpp;
hedgeOpp->twin = hedge;
// oppFace was modified, so need to recompute
oppFace->computeNormalAndCentroid();
}
else
{
// just merge the halfedges
hedgePrev->next = hedge;
hedge->prev = hedgePrev;
}
return discardedFace;
}
//////////////////////////////////////////////////////////////////////////
// check face consistency
bool QuickHullFace::checkFaceConsistency()
{
// do a sanity check on the face
QuickHullHalfEdge* hedge = edge;
PxU32 numv = 0;
// check degenerate face
do
{
numv++;
hedge = hedge->next;
} while (hedge != edge);
// degenerate face found
PX_ASSERT(numv > 2);
numv = 0;
hedge = edge;
do
{
QuickHullHalfEdge* hedgeOpp = hedge->twin;
// check if we have twin set
PX_ASSERT(hedgeOpp != NULL);
// twin for the twin must be the original edge
PX_ASSERT(hedgeOpp->twin == hedge);
QuickHullFace* oppFace = hedgeOpp->face;
PX_UNUSED(oppFace);
// opposite edge face must be set and valid
PX_ASSERT(oppFace != NULL);
PX_ASSERT(oppFace->state != QuickHullFace::eDELETED);
// edges face must be this one
PX_ASSERT(hedge->face == this);
hedge = hedge->next;
} while (hedge != edge);
return true;
}
//////////////////////////////////////////////////////////////////////////
QuickHull::QuickHull(const PxCookingParams& params, const PxConvexMeshDesc& desc)
: mCookingParams(params), mConvexDesc(desc), mOutputNumVertices(0), mTerminalVertex(0xFFFFFFFF), mVerticesList(NULL), mNumHullFaces(0), mPrecomputedMinMax(false),
mTolerance(-1.0f), mPlaneTolerance(-1.0f)
{
}
//////////////////////////////////////////////////////////////////////////
QuickHull::~QuickHull()
{
}
//////////////////////////////////////////////////////////////////////////
// sets the precomputed min/max values
void QuickHull::setPrecomputedMinMax(const QuickHullVertex* minVertex,const QuickHullVertex* maxVertex, const float tolerance,const float planeTolerance)
{
for (PxU32 i = 0; i < 3; i++)
{
mMinVertex[i] = minVertex[i];
mMaxVertex[i] = maxVertex[i];
}
mTolerance = tolerance;
mPlaneTolerance = planeTolerance;
mPrecomputedMinMax = true;
}
//////////////////////////////////////////////////////////////////////////
// preallocate internal buffers
void QuickHull::preallocate(PxU32 numVertices)
{
PX_ASSERT(numVertices > 0);
// max num vertices = numVertices
mMaxVertices = PxMax(PxU32(8), numVertices); // 8 is min, since we can expand to AABB during the clean vertices phase
mVerticesList = PX_ALLOCATE(QuickHullVertex, mMaxVertices, "QuickHullVertex");
// estimate the max half edges
PxU32 maxHalfEdges = (3 * mMaxVertices - 6) * 3;
mFreeHalfEdges.init(maxHalfEdges);
// estimate the max faces
PxU32 maxFaces = (2 * mMaxVertices - 4);
mFreeFaces.init(maxFaces*2);
mHullFaces.reserve(maxFaces);
mUnclaimedPoints.reserve(numVertices);
mNewFaces.reserve(32);
mRemovedFaces.reserve(32);
mDiscardedFaces.reserve(32);
mHorizon.reserve(PxMin(numVertices,PxU32(128)));
}
//////////////////////////////////////////////////////////////////////////
// release internal buffers
void QuickHull::releaseHull()
{
PX_FREE(mVerticesList);
mHullFaces.clear();
}
//////////////////////////////////////////////////////////////////////////
// returns the maximum number of vertices on a face
PxU32 QuickHull::maxNumVertsPerFace() const
{
PxU32 numFaces = mHullFaces.size();
PxU32 maxVerts = 0;
for (PxU32 i = 0; i < numFaces; i++)
{
const local::QuickHullFace& face = *mHullFaces[i];
if (face.state == local::QuickHullFace::eVISIBLE)
{
if (face.numEdges > maxVerts)
maxVerts = face.numEdges;
}
}
return maxVerts;
}
//////////////////////////////////////////////////////////////////////////
// parse the input vertices and store them in the hull
void QuickHull::parseInputVertices(const PxVec3* verts, PxU32 numVerts)
{
PX_ASSERT(verts);
PX_ASSERT(numVerts <= mMaxVertices);
mNumVertices = numVerts;
for (PxU32 i = 0; i < numVerts; i++)
{
mVerticesList[i].point = verts[i];
mVerticesList[i].index = i;
}
}
//////////////////////////////////////////////////////////////////////////
// compute min max verts
void QuickHull::computeMinMaxVerts()
{
for (PxU32 i = 0; i < 3; i++)
{
mMinVertex[i] = mVerticesList[0];
mMaxVertex[i] = mVerticesList[0];
}
PxVec3 max = mVerticesList[0].point;
PxVec3 min = mVerticesList[0].point;
// get the max min vertices along the x,y,z
for (PxU32 i = 1; i < mNumVertices; i++)
{
const QuickHullVertex& testVertex = mVerticesList[i];
const PxVec3& testPoint = testVertex.point;
if (testPoint.x > max.x)
{
max.x = testPoint.x;
mMaxVertex[0] = testVertex;
}
else if (testPoint.x < min.x)
{
min.x = testPoint.x;
mMinVertex[0] = testVertex;
}
if (testPoint.y > max.y)
{
max.y = testPoint.y;
mMaxVertex[1] = testVertex;
}
else if (testPoint.y < min.y)
{
min.y = testPoint.y;
mMinVertex[1] = testVertex;
}
if (testPoint.z > max.z)
{
max.z = testPoint.z;
mMaxVertex[2] = testVertex;
}
else if (testPoint.z < min.z)
{
min.z = testPoint.z;
mMinVertex[2] = testVertex;
}
}
const float sizeTol = (max.x-min.x + max.y - min.y + max.z - min.z)*0.5f;
mTolerance = PxMax(local::PLANE_THICKNES * sizeTol, local::PLANE_THICKNES);
mPlaneTolerance = PxMax(mCookingParams.planeTolerance * sizeTol, mCookingParams.planeTolerance);
}
//////////////////////////////////////////////////////////////////////////
// find the initial simplex
// 1. search in max axis from compute min,max
// 2. 3rd point is the furthest vertex from the initial line
// 3. 4th vertex is along the line, 3rd vertex normal
bool QuickHull::findSimplex()
{
float max = 0;
PxU32 imax = 0;
for (PxU32 i = 0; i < 3; i++)
{
float diff = mMaxVertex[i].point[i] - mMinVertex[i].point[i];
if (diff > max)
{
max = diff;
imax = i;
}
}
if (max <= mTolerance)
// should not happen as we clear the vertices before and expand them if they are really close to each other
return PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "QuickHullConvexHullLib::findSimplex: Simplex input points appers to be almost at the same place");
QuickHullVertex simplex[4];
// set first two vertices to be those with the greatest
// one dimensional separation
simplex[0] = mMaxVertex[imax];
simplex[1] = mMinVertex[imax];
// set third vertex to be the vertex farthest from
// the line between simplex[0] and simplex[1]
PxVec3 normal;
float maxDist = 0;
PxVec3 u01 = (simplex[1].point - simplex[0].point);
u01.normalize();
for (PxU32 i = 0; i < mNumVertices; i++)
{
const QuickHullVertex& testVert = mVerticesList[i];
const PxVec3& testPoint = testVert.point;
const PxVec3 diff = testPoint - simplex[0].point;
const PxVec3 xprod = u01.cross(diff);
const float lenSqr = xprod.magnitudeSquared();
if (lenSqr > maxDist && testVert.index != simplex[0].index && testVert.index != simplex[1].index)
{
maxDist = lenSqr;
simplex[2] = testVert;
normal = xprod;
}
}
if (PxSqrt(maxDist) <= mTolerance)
return PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "QuickHullConvexHullLib::findSimplex: Simplex input points appers to be colinear.");
normal.normalize();
// set the forth vertex in the normal direction
const float d0 = simplex[2].point.dot(normal);
maxDist = 0.0f;
for (PxU32 i = 0; i < mNumVertices; i++)
{
const QuickHullVertex& testVert = mVerticesList[i];
const PxVec3& testPoint = testVert.point;
const float dist = PxAbs(testPoint.dot(normal) - d0);
if (dist > maxDist && testVert.index != simplex[0].index &&
testVert.index != simplex[1].index && testVert.index != simplex[2].index)
{
maxDist = dist;
simplex[3] = testVert;
}
}
if (PxAbs(maxDist) <= mTolerance)
return PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "QuickHullConvexHullLib::findSimplex: Simplex input points appers to be coplanar.");
// now create faces from those triangles
if (!addSimplex(&simplex[0], simplex[3].point.dot(normal) - d0 < 0))
return false;
return true;
}
//////////////////////////////////////////////////////////////////////////
// create triangle from given vertices, produce new face and connect the half edges
QuickHullFace* QuickHull::createTriangle(const QuickHullVertex& v0, const QuickHullVertex& v1, const QuickHullVertex& v2)
{
QuickHullFace* face = getFreeHullFace();
if (!face)
return NULL;
QuickHullHalfEdge* he0 = getFreeHullHalfEdge();
if (!he0)
return NULL;
he0->face = face;
he0->tail = v0;
QuickHullHalfEdge* he1 = getFreeHullHalfEdge();
if (!he1)
return NULL;
he1->face = face;
he1->tail = v1;
QuickHullHalfEdge* he2 = getFreeHullHalfEdge();
if (!he2)
return NULL;
he2->face = face;
he2->tail = v2;
he0->prev = he2;
he0->next = he1;
he1->prev = he0;
he1->next = he2;
he2->prev = he1;
he2->next = he0;
face->edge = he0;
face->nextFace = NULL;
// compute the normal and offset
face->computeNormalAndCentroid();
return face;
}
//////////////////////////////////////////////////////////////////////////
// add initial simplex to the quickhull
// construct triangles from the simplex points and connect them with half edges
bool QuickHull::addSimplex(QuickHullVertex* simplex, bool flipTriangle)
{
PX_ASSERT(simplex);
// get interior point
PxVec3 vectorSum = simplex[0].point;
for (PxU32 i = 1; i < 4; i++)
{
vectorSum += simplex[i].point;
}
mInteriorPoint = vectorSum / 4.0f;
QuickHullFace* tris[4];
// create the triangles from the initial simplex
if (flipTriangle)
{
tris[0] = createTriangle(simplex[0], simplex[1], simplex[2]);
if (tris[0] == NULL)
return false;
tris[1] = createTriangle(simplex[3], simplex[1], simplex[0]);
if (tris[1] == NULL)
return false;
tris[2] = createTriangle(simplex[3], simplex[2], simplex[1]);
if (tris[2] == NULL)
return false;
tris[3] = createTriangle(simplex[3], simplex[0], simplex[2]);
if (tris[3] == NULL)
return false;
for (PxU32 i = 0; i < 3; i++)
{
PxU32 k = (i + 1) % 3;
tris[i + 1]->getEdge(1)->setTwin(tris[k + 1]->getEdge(0));
tris[i + 1]->getEdge(2)->setTwin(tris[0]->getEdge(k));
}
}
else
{
tris[0] = createTriangle(simplex[0], simplex[2], simplex[1]);
if (tris[0] == NULL)
return false;
tris[1] = createTriangle(simplex[3], simplex[0], simplex[1]);
if (tris[1] == NULL)
return false;
tris[2] = createTriangle(simplex[3], simplex[1], simplex[2]);
if (tris[2] == NULL)
return false;
tris[3] = createTriangle(simplex[3], simplex[2], simplex[0]);
if (tris[3] == NULL)
return false;
for (PxU32 i = 0; i < 3; i++)
{
PxU32 k = (i + 1) % 3;
tris[i + 1]->getEdge(0)->setTwin(tris[k + 1]->getEdge(1));
tris[i + 1]->getEdge(2)->setTwin(tris[0]->getEdge((3 - i) % 3));
}
}
// push back the first 4 faces created from the simplex
for (PxU32 i = 0; i < 4; i++)
{
mHullFaces.pushBack(tris[i]);
}
mNumHullFaces = 4;
// go through points and add point to faces if they are on the plane
for (PxU32 i = 0; i < mNumVertices; i++)
{
const QuickHullVertex& v = mVerticesList[i];
if (v == simplex[0] || v == simplex[1] || v == simplex[2] || v == simplex[3])
{
continue;
}
float maxDist = mTolerance;
QuickHullFace* maxFace = NULL;
for (PxU32 k = 0; k < 4; k++)
{
const float dist = tris[k]->distanceToPlane(v.point);
if (dist > maxDist)
{
maxFace = tris[k];
maxDist = dist;
}
}
if (maxFace != NULL)
{
addPointToFace(*maxFace, &mVerticesList[i], maxDist);
}
}
return true;
}
//////////////////////////////////////////////////////////////////////////
// adds a point to the conflict list
// the trick here is to store the most furthest point as the last, thats the only one we care about
// the rest is not important, we just need to store them and claim to new faces later, if the
// faces most furthest point is the current global maximum
void QuickHull::addPointToFace(QuickHullFace& face, QuickHullVertex* vertex, float dist)
{
// if we dont have a conflict list, store the vertex as the first one in the conflict list
vertex->dist = dist;
if(!face.conflictList)
{
face.conflictList = vertex;
vertex->dist = dist;
vertex->next = NULL;
return;
}
PX_ASSERT(face.conflictList);
// this is not the furthest vertex, store it as next in the linked list
if (face.conflictList->dist > dist)
{
vertex->next = face.conflictList->next;
face.conflictList->next = vertex;
}
else
{
// this is the furthest vertex, store it as first in the linked list
vertex->next = face.conflictList;
face.conflictList = vertex;
}
}
//////////////////////////////////////////////////////////////////////////
// removes eye point from a conflict list
// we know that the vertex must the last, as we store it at the back, so just popback()
void QuickHull::removeEyePointFromFace(QuickHullFace& face, const QuickHullVertex* vertex)
{
PX_UNUSED(vertex);
// the picked vertex should always be the first in the linked list
PX_ASSERT(face.conflictList == vertex);
face.conflictList = face.conflictList->next;
}
//////////////////////////////////////////////////////////////////////////
// merge polygons with similar normals
void QuickHull::postMergeHull()
{
// merge faces with similar normals
for (PxU32 i = 0; i < mHullFaces.size(); i++)
{
QuickHullFace& face = *mHullFaces[i];
if (face.state == QuickHullFace::eVISIBLE)
{
PX_ASSERT(face.checkFaceConsistency());
while (doPostAdjacentMerge(face, local::MAXDOT_MINANG));
}
}
}
//////////////////////////////////////////////////////////////////////////
// builds the hull
// 1. find the initial simplex
// 2. check if simplex has a valid area
// 3. add vertices to the hull. We add vertex most furthest from the hull
// 4. terminate if hull limit reached or we have added all vertices
QuickHullResult::Enum QuickHull::buildHull()
{
QuickHullVertex* eyeVtx = NULL;
QuickHullFace* eyeFace;
// compute the vertex min max along x,y,z
if(!mPrecomputedMinMax)
computeMinMaxVerts();
// find the initial simplex of the hull
if (!findSimplex())
{
return QuickHullResult::eFAILURE;
}
// simplex area test
const bool useAreaTest = mConvexDesc.flags & PxConvexFlag::eCHECK_ZERO_AREA_TRIANGLES ? true : false;
const float areaEpsilon = mCookingParams.areaTestEpsilon * 2.0f;
if (useAreaTest)
{
for (PxU32 i = 0; i < mHullFaces.size(); i++)
{
if (mHullFaces[i]->area < areaEpsilon)
{
return QuickHullResult::eZERO_AREA_TEST_FAILED;
}
}
}
// add points to the hull
PxU32 numVerts = 4; // initial vertex count - simplex vertices
while ((eyeVtx = nextPointToAdd(eyeFace)) != NULL && eyeVtx->index != mTerminalVertex)
{
// if plane shifting vertex limit, we need the reduced hull
if((mConvexDesc.flags & PxConvexFlag::ePLANE_SHIFTING) && (numVerts >= mConvexDesc.vertexLimit))
break;
bool addFailed = false;
PX_ASSERT(eyeFace);
if (!addPointToHull(eyeVtx, *eyeFace, addFailed))
{
mOutputNumVertices = numVerts;
// we hit the polygons hard limit
return QuickHullResult::ePOLYGONS_LIMIT_REACHED;
}
// We failed to add the vertex, store the vertex as terminal vertex and re run the hull generator
if(addFailed)
{
// set the terminal vertex
mTerminalVertex = eyeVtx->index;
// reset the edges/faces memory
mFreeHalfEdges.reset();
mFreeFaces.reset();
// reset the hull state
mHullFaces.clear();
mNumHullFaces = 0;
mUnclaimedPoints.clear();
mHorizon.clear();
mNewFaces.clear();
mRemovedFaces.clear();
mDiscardedFaces.clear();
// rerun the hull generator
return buildHull();
}
numVerts++;
}
mOutputNumVertices = numVerts;
// vertex limit has been reached. We did not stopped the iteration, since we
// will use the produced hull to compute OBB from it and use the planes
// to slice the initial OBB
if (numVerts > mConvexDesc.vertexLimit)
{
return QuickHullResult::eVERTEX_LIMIT_REACHED;
}
return QuickHullResult::eSUCCESS;
}
//////////////////////////////////////////////////////////////////////////
// finds the best point to add to the hull
// go through the faces conflict list and pick the global maximum
QuickHullVertex* QuickHull::nextPointToAdd(QuickHullFace*& eyeFace)
{
QuickHullVertex* eyeVtx = NULL;
QuickHullFace* eyeF = NULL;
float maxDist = mPlaneTolerance;
for (PxU32 i = 0; i < mHullFaces.size(); i++)
{
if (mHullFaces[i]->state == QuickHullFace::eVISIBLE && mHullFaces[i]->conflictList)
{
const float dist = mHullFaces[i]->conflictList->dist;
if (maxDist < dist)
{
maxDist = dist;
eyeVtx = mHullFaces[i]->conflictList;
eyeF = mHullFaces[i];
}
}
}
eyeFace = eyeF;
return eyeVtx;
}
//////////////////////////////////////////////////////////////////////////
// adds vertex to the hull
// sets addFailed to true if we failed to add a point because the merging failed
// this can happen as the face plane equation changes and some faces might become concave
// returns false if the new faces count would hit the hull face hard limit (255 / 64 for GPU-compatible)
bool QuickHull::addPointToHull(const QuickHullVertex* eyeVtx, QuickHullFace& eyeFace, bool& addFailed)
{
addFailed = false;
// removes the eyePoint from the conflict list
removeEyePointFromFace(eyeFace, eyeVtx);
// calculates the horizon from the eyePoint
calculateHorizon(eyeVtx->point, NULL, eyeFace, mHorizon, mRemovedFaces);
// check if we dont hit the polygons hard limit
if (mNumHullFaces + mHorizon.size() > mConvexDesc.polygonLimit)
{
// make the faces visible again and quit
for (PxU32 i = 0; i < mRemovedFaces.size(); i++)
{
mRemovedFaces[i]->state = QuickHullFace::eVISIBLE;
}
mNumHullFaces += mRemovedFaces.size();
return false;
}
// adds new faces from given horizon and eyePoint
addNewFacesFromHorizon(eyeVtx, mHorizon, mNewFaces);
bool mergeFailed = false;
// first merge pass ... merge faces which are non-convex
// as determined by the larger face
for (PxU32 i = 0; i < mNewFaces.size(); i++)
{
QuickHullFace& face = *mNewFaces[i];
if (face.state == QuickHullFace::eVISIBLE)
{
PX_ASSERT(face.checkFaceConsistency());
while (doAdjacentMerge(face, true, mergeFailed));
}
}
if (mergeFailed)
{
addFailed = true;
return true;
}
// second merge pass ... merge faces which are non-convex
// wrt either face
for (PxU32 i = 0; i < mNewFaces.size(); i++)
{
QuickHullFace& face = *mNewFaces[i];
if (face.state == QuickHullFace::eNON_CONVEX)
{
face.state = QuickHullFace::eVISIBLE;
while (doAdjacentMerge(face, false, mergeFailed));
}
}
if (mergeFailed)
{
addFailed = true;
return true;
}
resolveUnclaimedPoints(mNewFaces);
mHorizon.clear();
mNewFaces.clear();
mRemovedFaces.clear();
return true;
}
//////////////////////////////////////////////////////////////////////////
// merge adjacent faces
// We merge 2 adjacent faces if they lie on the same thick plane defined by the mTolerance
// we do this in 2 steps to ensure we dont leave non-convex faces
bool QuickHull::doAdjacentMerge(QuickHullFace& face, bool mergeWrtLargeFace, bool& mergeFailed)
{
QuickHullHalfEdge* hedge = face.edge;
mergeFailed = false;
bool convex = true;
do
{
const QuickHullFace& oppFace = *hedge->getOppositeFace();
bool merge = false;
if (mergeWrtLargeFace)
{
// merge faces if they are parallel or non-convex
// wrt to the larger face; otherwise, just mark
// the face non-convex for the second pass.
if (face.area > oppFace.area)
{
if (hedge->getOppositeFaceDistance() > -mTolerance)
{
merge = true;
}
else if (hedge->twin->getOppositeFaceDistance() > -mTolerance)
{
convex = false;
}
}
else
{
if (hedge->twin->getOppositeFaceDistance() > -mTolerance)
{
merge = true;
}
else if (hedge->getOppositeFaceDistance() > -mTolerance)
{
convex = false;
}
}
}
else
{
// then merge faces if they are definitively non-convex
if (hedge->getOppositeFaceDistance() > -mTolerance ||
hedge->twin->getOppositeFaceDistance() > -mTolerance)
{
merge = true;
}
}
if (merge)
{
mDiscardedFaces.clear();
if (!face.mergeAdjacentFace(hedge, mDiscardedFaces))
{
mergeFailed = true;
return false;
}
mNumHullFaces -= mDiscardedFaces.size();
for (PxU32 i = 0; i < mDiscardedFaces.size(); i++)
{
deleteFacePoints(*mDiscardedFaces[i], &face);
}
PX_ASSERT(face.checkFaceConsistency());
return true;
}
hedge = hedge->next;
} while (hedge != face.edge);
if (!convex)
{
face.state = QuickHullFace::eNON_CONVEX;
}
return false;
}
//////////////////////////////////////////////////////////////////////////
// merge adjacent faces doing normal test
// we try to merge more aggressively 2 faces with the same normal.
bool QuickHull::doPostAdjacentMerge(QuickHullFace& face, const float maxdot_minang)
{
QuickHullHalfEdge* hedge = face.edge;
do
{
const QuickHullFace& oppFace = *hedge->getOppositeFace();
bool merge = false;
const PxVec3& ni = face.normal;
const PxVec3& nj = oppFace.normal;
const float dotP = ni.dot(nj);
if (dotP > maxdot_minang)
{
if (face.area >= oppFace.area)
{
// check if we can merge the 2 faces
merge = canMergeFaces(*hedge);
}
}
if (merge)
{
QuickHullFaceArray discardedFaces;
face.mergeAdjacentFace(hedge, discardedFaces);
mNumHullFaces -= discardedFaces.size();
for (PxU32 i = 0; i < discardedFaces.size(); i++)
{
deleteFacePoints(*discardedFaces[i], &face);
}
PX_ASSERT(face.checkFaceConsistency());
return true;
}
hedge = hedge->next;
} while (hedge != face.edge);
return false;
}
//////////////////////////////////////////////////////////////////////////
// checks if 2 adjacent faces can be merged
// 1. creates a face with merged vertices
// 2. computes new normal and centroid
// 3. checks that all verts are not too far away from the plane
// 4. checks that the new polygon is still convex
// 5. checks if we are about to merge only 2 neighbor faces, we dont
// want to merge additional faces, that might corrupt the convexity
bool QuickHull::canMergeFaces(const QuickHullHalfEdge& he)
{
const QuickHullFace& face1 = *he.face;
const QuickHullFace& face2 = *he.twin->face;
// construct the merged face
PX_ALLOCA(edges, QuickHullHalfEdge, (face1.numEdges + face2.numEdges));
PxMemSet(edges, 0, (face1.numEdges + face2.numEdges)*sizeof(QuickHullHalfEdge));
QuickHullFace mergedFace;
mergedFace.edge = &edges[0];
// copy the first face edges
PxU32 currentEdge = 0;
const QuickHullHalfEdge* heTwin = NULL;
const QuickHullHalfEdge* heCopy = NULL;
const QuickHullHalfEdge* startEdge = (face1.edge != &he) ? face1.edge : face1.edge->next;
const QuickHullHalfEdge* copyHe = startEdge;
do
{
edges[currentEdge].face = &mergedFace;
edges[currentEdge].tail = copyHe->tail;
if(copyHe == &he)
{
heTwin = copyHe->twin;
heCopy = &edges[currentEdge];
}
const PxU32 nextIndex = (copyHe->next == startEdge) ? 0 : currentEdge + 1;
const PxU32 prevIndex = (currentEdge == 0) ? face1.numEdges - 1 : currentEdge - 1;
edges[currentEdge].next = &edges.mPointer[nextIndex];
edges[currentEdge].prev = &edges.mPointer[prevIndex];
currentEdge++;
copyHe = copyHe->next;
} while (copyHe != startEdge);
// copy the second face edges
copyHe = face2.edge;
do
{
edges[currentEdge].face = &mergedFace;
edges[currentEdge].tail = copyHe->tail;
if(heTwin == copyHe)
heTwin = &edges[currentEdge];
const PxU32 nextIndex = (copyHe->next == face2.edge) ? face1.numEdges : currentEdge + 1;
const PxU32 prevIndex = (currentEdge == face1.numEdges) ? face1.numEdges + face2.numEdges - 1 : currentEdge - 1;
edges[currentEdge].next = &edges.mPointer[nextIndex];
edges[currentEdge].prev = &edges.mPointer[prevIndex];
currentEdge++;
copyHe = copyHe->next;
} while (copyHe != face2.edge);
PX_ASSERT(heTwin);
PX_ASSERT(heCopy);
QuickHullHalfEdge* hedgeAdjPrev = heCopy->prev;
QuickHullHalfEdge* hedgeAdjNext = heCopy->next;
QuickHullHalfEdge* hedgeOppPrev = heTwin->prev;
QuickHullHalfEdge* hedgeOppNext = heTwin->next;
hedgeOppPrev->next = hedgeAdjNext;
hedgeAdjNext->prev = hedgeOppPrev;
hedgeAdjPrev->next = hedgeOppNext;
hedgeOppNext->prev = hedgeAdjPrev;
// compute normal and centroid
mergedFace.computeNormalAndCentroid();
// test the vertex distance
const float maxDist = mPlaneTolerance;
for(PxU32 iVerts=0; iVerts< mNumVertices; iVerts++)
{
const QuickHullVertex& vertex = mVerticesList[iVerts];
const float dist = mergedFace.distanceToPlane(vertex.point);
if (dist > maxDist)
{
return false;
}
}
// check the convexity
QuickHullHalfEdge* qhe = mergedFace.edge;
do
{
const QuickHullVertex& vertex = qhe->tail;
const QuickHullVertex& nextVertex = qhe->next->tail;
PxVec3 edgeVector = nextVertex.point - vertex.point;
edgeVector.normalize();
const PxVec3 outVector = -mergedFace.normal.cross(edgeVector);
QuickHullHalfEdge* testHe = qhe->next;
do
{
const QuickHullVertex& testVertex = testHe->tail;
const float dist = (testVertex.point - vertex.point).dot(outVector);
if (dist > mTolerance)
return false;
testHe = testHe->next;
} while (testHe != qhe->next);
qhe = qhe->next;
} while (qhe != mergedFace.edge);
const QuickHullFace* oppFace = he.getOppositeFace();
QuickHullHalfEdge* hedgeOpp = he.twin;
hedgeAdjPrev = he.prev;
hedgeAdjNext = he.next;
hedgeOppPrev = hedgeOpp->prev;
hedgeOppNext = hedgeOpp->next;
// check if we are lining up with the face in adjPrev dir
while (hedgeAdjPrev->getOppositeFace() == oppFace)
{
hedgeAdjPrev = hedgeAdjPrev->prev;
hedgeOppNext = hedgeOppNext->next;
}
// check if we are lining up with the face in adjNext dir
while (hedgeAdjNext->getOppositeFace() == oppFace)
{
hedgeOppPrev = hedgeOppPrev->prev;
hedgeAdjNext = hedgeAdjNext->next;
}
// no redundant merges, just clean merge of 2 neighbour faces
if (hedgeOppPrev->getOppositeFace() == hedgeAdjNext->getOppositeFace())
{
return false;
}
if (hedgeAdjPrev->getOppositeFace() == hedgeOppNext->getOppositeFace())
{
return false;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
// delete face points and store them as unclaimed, so we can add them back to new faces later
void QuickHull::deleteFacePoints(QuickHullFace& face, QuickHullFace* absorbingFace)
{
// no conflict list for this face
if(!face.conflictList)
return;
QuickHullVertex* unclaimedVertex = face.conflictList;
QuickHullVertex* vertexToClaim = NULL;
while (unclaimedVertex)
{
vertexToClaim = unclaimedVertex;
unclaimedVertex = unclaimedVertex->next;
vertexToClaim->next = NULL;
if (!absorbingFace)
{
mUnclaimedPoints.pushBack(vertexToClaim);
}
else
{
const float dist = absorbingFace->distanceToPlane(vertexToClaim->point);
if (dist > mTolerance)
{
addPointToFace(*absorbingFace, vertexToClaim, dist);
}
else
{
mUnclaimedPoints.pushBack(vertexToClaim);
}
}
}
face.conflictList = NULL;
}
//////////////////////////////////////////////////////////////////////////
// calculate the horizon from the eyePoint against a given face
void QuickHull::calculateHorizon(const PxVec3& eyePoint, QuickHullHalfEdge* edge0, QuickHullFace& face, QuickHullHalfEdgeArray& horizon, QuickHullFaceArray& removedFaces)
{
deleteFacePoints(face, NULL);
face.state = QuickHullFace::eDELETED;
removedFaces.pushBack(&face);
mNumHullFaces--;
QuickHullHalfEdge* edge;
if (edge0 == NULL)
{
edge0 = face.getEdge(0);
edge = edge0;
}
else
{
edge = edge0->next;
}
do
{
QuickHullFace* oppFace = edge->getOppositeFace();
if (oppFace->state == QuickHullFace::eVISIBLE)
{
const float dist = oppFace->distanceToPlane(eyePoint);
if (dist > mTolerance)
{
calculateHorizon(eyePoint, edge->twin, *oppFace, horizon, removedFaces);
}
else
{
horizon.pushBack(edge);
}
}
edge = edge->next;
} while (edge != edge0);
}
//////////////////////////////////////////////////////////////////////////
// adds new faces from given horizon and eyePoint
void QuickHull::addNewFacesFromHorizon(const QuickHullVertex* eyePoint, const QuickHullHalfEdgeArray& horizon, QuickHullFaceArray& newFaces)
{
QuickHullHalfEdge* hedgeSidePrev = NULL;
QuickHullHalfEdge* hedgeSideBegin = NULL;
for (PxU32 i = 0; i < horizon.size(); i++)
{
const QuickHullHalfEdge& horizonHe = *horizon[i];
QuickHullFace* face = createTriangle(*eyePoint, horizonHe.getHead(), horizonHe.getTail());
mHullFaces.pushBack(face);
mNumHullFaces++;
face->getEdge(2)->setTwin(horizonHe.twin);
QuickHullHalfEdge* hedgeSide = face->edge;
if (hedgeSidePrev != NULL)
{
hedgeSide->next->setTwin(hedgeSidePrev);
}
else
{
hedgeSideBegin = hedgeSide;
}
newFaces.pushBack(face);
hedgeSidePrev = hedgeSide;
}
if(hedgeSideBegin)
hedgeSideBegin->next->setTwin(hedgeSidePrev);
}
//////////////////////////////////////////////////////////////////////////
// resolve unclaimed points
void QuickHull::resolveUnclaimedPoints(const QuickHullFaceArray& newFaces)
{
for (PxU32 i = 0; i < mUnclaimedPoints.size(); i++)
{
QuickHullVertex* vtx = mUnclaimedPoints[i];
float maxDist = mTolerance;
QuickHullFace* maxFace = NULL;
for (PxU32 j = 0; j < newFaces.size(); j++)
{
const QuickHullFace& newFace = *newFaces[j];
if (newFace.state == QuickHullFace::eVISIBLE)
{
const float dist = newFace.distanceToPlane(vtx->point);
if (dist > maxDist)
{
maxDist = dist;
maxFace = newFaces[j];
}
}
}
if (maxFace != NULL)
{
addPointToFace(*maxFace, vtx, maxDist);
}
}
mUnclaimedPoints.clear();
}
//////////////////////////////////////////////////////////////////////////
// helper struct for hull expand point
struct ExpandPoint
{
PxPlane plane[3]; // the 3 planes that will give us the point
PxU32 planeIndex[3]; // index of the planes for identification
bool operator==(const ExpandPoint& expPoint) const
{
if (expPoint.planeIndex[0] == planeIndex[0] && expPoint.planeIndex[1] == planeIndex[1] &&
expPoint.planeIndex[2] == planeIndex[2])
return true;
else
return false;
}
};
//////////////////////////////////////////////////////////////////////////
// gets the half edge neighbors and form the expand point
void getExpandPoint(const QuickHullHalfEdge& he, ExpandPoint& expandPoint, const PxArray<PxU32>* translationTable = NULL)
{
// set the first 2 - the edge face and the twin face
expandPoint.planeIndex[0] = (translationTable) ? ((*translationTable)[he.face->index]) : (he.face->index);
PxU32 index = translationTable ? ((*translationTable)[he.twin->face->index]) : he.twin->face->index;
if (index < expandPoint.planeIndex[0])
{
expandPoint.planeIndex[1] = expandPoint.planeIndex[0];
expandPoint.planeIndex[0] = index;
}
else
{
expandPoint.planeIndex[1] = index;
}
// now the 3rd one is the next he twin index
index = translationTable ? (*translationTable)[he.next->twin->face->index] : he.next->twin->face->index;
if (index < expandPoint.planeIndex[0])
{
expandPoint.planeIndex[2] = expandPoint.planeIndex[1];
expandPoint.planeIndex[1] = expandPoint.planeIndex[0];
expandPoint.planeIndex[0] = index;
}
else
{
if (index < expandPoint.planeIndex[1])
{
expandPoint.planeIndex[2] = expandPoint.planeIndex[1];
expandPoint.planeIndex[1] = index;
}
else
{
expandPoint.planeIndex[2] = index;
}
}
}
//////////////////////////////////////////////////////////////////////////
// adds the expand point, don't add similar point
void addExpandPoint(const ExpandPoint& expandPoint, PxArray<ExpandPoint>& expandPoints)
{
for (PxU32 i = expandPoints.size(); i--;)
{
if (expandPoint == expandPoints[i])
{
return;
}
}
expandPoints.pushBack(expandPoint);
}
//////////////////////////////////////////////////////////////////////////
// helper for 3 planes intersection
static PxVec3 threePlaneIntersection(const PxPlane &p0, const PxPlane &p1, const PxPlane &p2)
{
PxMat33 mp = (PxMat33(p0.n, p1.n, p2.n)).getTranspose();
PxMat33 mi = (mp).getInverse();
PxVec3 b(p0.d, p1.d, p2.d);
return -mi.transform(b);
}
}
//////////////////////////////////////////////////////////////////////////
QuickHullConvexHullLib::QuickHullConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params)
: ConvexHullLib(desc, params),mQuickHull(NULL), mCropedConvexHull(NULL), mOutMemoryBuffer(NULL), mFaceTranslateTable(NULL)
{
mQuickHull = PX_NEW(local::QuickHull)(params, desc);
mQuickHull->preallocate(desc.points.count);
}
//////////////////////////////////////////////////////////////////////////
QuickHullConvexHullLib::~QuickHullConvexHullLib()
{
mQuickHull->releaseHull();
PX_DELETE(mQuickHull);
PX_DELETE(mCropedConvexHull);
PX_FREE(mOutMemoryBuffer);
mFaceTranslateTable = NULL; // memory is a part of mOutMemoryBuffer
}
//////////////////////////////////////////////////////////////////////////
// create the hull
// 1. clean the input vertices
// 2. check we can construct the simplex, if not expand the input verts
// 3. prepare the quickhull - preallocate, parse input verts
// 4. construct the hull
// 5. post merge faces if limit not reached
// 6. if limit reached, expand the hull
PxConvexMeshCookingResult::Enum QuickHullConvexHullLib::createConvexHull()
{
PxConvexMeshCookingResult::Enum res = PxConvexMeshCookingResult::eFAILURE;
PxU32 vcount = mConvexMeshDesc.points.count;
if ( vcount < 8 )
vcount = 8;
PxVec3* outvsource = PX_ALLOCATE(PxVec3, vcount, "PxVec3");
PxU32 outvcount;
// cleanup the vertices first
if(mConvexMeshDesc.flags & PxConvexFlag::eSHIFT_VERTICES)
{
if(!shiftAndcleanupVertices(mConvexMeshDesc.points.count, reinterpret_cast<const PxVec3*> (mConvexMeshDesc.points.data), mConvexMeshDesc.points.stride,
outvcount, outvsource))
{
PX_FREE(outvsource);
return res;
}
}
else
{
if(!cleanupVertices(mConvexMeshDesc.points.count, reinterpret_cast<const PxVec3*> (mConvexMeshDesc.points.data), mConvexMeshDesc.points.stride,
outvcount, outvsource))
{
PX_FREE(outvsource);
return res;
}
}
local::QuickHullVertex minimumVertex[3];
local::QuickHullVertex maximumVertex[3];
float tolerance;
float planeTolerance;
bool canReuse = cleanupForSimplex(outvsource, outvcount, &minimumVertex[0], &maximumVertex[0], tolerance, planeTolerance);
mQuickHull->parseInputVertices(outvsource,outvcount);
if(canReuse)
{
mQuickHull->setPrecomputedMinMax(minimumVertex, maximumVertex, tolerance, planeTolerance);
}
local::QuickHullResult::Enum qhRes = mQuickHull->buildHull();
switch(qhRes)
{
case local::QuickHullResult::eZERO_AREA_TEST_FAILED:
res = PxConvexMeshCookingResult::eZERO_AREA_TEST_FAILED;
break;
case local::QuickHullResult::eSUCCESS:
mQuickHull->postMergeHull();
res = PxConvexMeshCookingResult::eSUCCESS;
break;
case local::QuickHullResult::ePOLYGONS_LIMIT_REACHED:
if(mQuickHull->getNbHullVerts() > mConvexMeshDesc.vertexLimit)
{
// expand the hull
if(mConvexMeshDesc.flags & PxConvexFlag::ePLANE_SHIFTING)
res = expandHull();
else
res = expandHullOBB();
}
else
{
mQuickHull->postMergeHull();
}
res = PxConvexMeshCookingResult::ePOLYGONS_LIMIT_REACHED;
break;
case local::QuickHullResult::eVERTEX_LIMIT_REACHED:
{
// expand the hull
if(mConvexMeshDesc.flags & PxConvexFlag::ePLANE_SHIFTING)
res = expandHull();
else
res = expandHullOBB();
}
break;
case local::QuickHullResult::eFAILURE:
break;
};
// check if we need to build GRB compatible mesh
// if hull was cropped we already have a compatible mesh, if not check
// the max verts per face
if(((mConvexMeshDesc.flags & PxConvexFlag::eGPU_COMPATIBLE) || mCookingParams.buildGPUData) && !mCropedConvexHull &&
(res == PxConvexMeshCookingResult::eSUCCESS || res == PxConvexMeshCookingResult::ePOLYGONS_LIMIT_REACHED))
{
PX_ASSERT(mQuickHull);
// if we hit the vertex per face limit, expand the hull by cropping OBB
if(mQuickHull->maxNumVertsPerFace() > gpuMaxVertsPerFace)
{
res = expandHullOBB();
}
}
PX_FREE(outvsource);
return res;
}
//////////////////////////////////////////////////////////////////////////
// fixup the input vertices to be not colinear or coplanar for the initial simplex find
bool QuickHullConvexHullLib::cleanupForSimplex(PxVec3* vertices, PxU32 vertexCount, local::QuickHullVertex* minimumVertex,
local::QuickHullVertex* maximumVertex, float& tolerance, float& planeTolerance)
{
bool retVal = true;
for (PxU32 i = 0; i < 3; i++)
{
minimumVertex[i].point = vertices[0];
minimumVertex[i].index = 0;
maximumVertex[i].point = vertices[0];
maximumVertex[i].index = 0;
}
PxVec3 max = vertices[0];
PxVec3 min = vertices[0];
// get the max min vertices along the x,y,z
for (PxU32 i = 1; i < vertexCount; i++)
{
const PxVec3& testPoint = vertices[i];
if (testPoint.x > max.x)
{
max.x = testPoint.x;
maximumVertex[0].point = testPoint;
maximumVertex[0].index = i;
}
else if (testPoint.x < min.x)
{
min.x = testPoint.x;
minimumVertex[0].point = testPoint;
minimumVertex[0].index = i;
}
if (testPoint.y > max.y)
{
max.y = testPoint.y;
maximumVertex[1].point = testPoint;
maximumVertex[1].index = i;
}
else if (testPoint.y < min.y)
{
min.y = testPoint.y;
minimumVertex[1].point = testPoint;
minimumVertex[1].index = i;
}
if (testPoint.z > max.z)
{
max.z = testPoint.z;
maximumVertex[2].point = testPoint;
maximumVertex[2].index = i;
}
else if (testPoint.z < min.z)
{
min.z = testPoint.z;
minimumVertex[2].point = testPoint;
minimumVertex[2].index = i;
}
}
const float sizeTol = (max.x-min.x + max.y - min.y + max.z - min.z)*0.5f;
tolerance = PxMax(local::PLANE_THICKNES * sizeTol, local::PLANE_THICKNES);
planeTolerance = PxMax(mCookingParams.planeTolerance *sizeTol, mCookingParams.planeTolerance);
float fmax = 0;
PxU32 imax = 0;
for (PxU32 i = 0; i < 3; i++)
{
float diff = (maximumVertex[i].point)[i] - (minimumVertex[i].point)[i];
if (diff > fmax)
{
fmax = diff;
imax = i;
}
}
PxVec3 simplex[4];
// set first two vertices to be those with the greatest
// one dimensional separation
simplex[0] = maximumVertex[imax].point;
simplex[1] = minimumVertex[imax].point;
simplex[2] = simplex[3] = PxVec3(0.0f); // PT: added to silence the static analyzer
// set third vertex to be the vertex farthest from
// the line between simplex[0] and simplex[1]
PxVec3 normal(0.0f);
float maxDist = 0;
imax = 0;
PxVec3 u01 = (simplex[1] - simplex[0]);
u01.normalize();
for (PxU32 i = 0; i < vertexCount; i++)
{
const PxVec3& testPoint = vertices[i];
const PxVec3 diff = testPoint - simplex[0];
const PxVec3 xprod = u01.cross(diff);
const float lenSqr = xprod.magnitudeSquared();
if (lenSqr > maxDist)
{
maxDist = lenSqr;
simplex[2] = testPoint;
normal = xprod;
imax = i;
}
}
if (PxSqrt(maxDist) < planeTolerance)
{
// points are collinear, we have to move the point further
PxVec3 u02 = simplex[2] - simplex[0];
float fT = u02.dot(u01);
const float sqrLen = u01.magnitudeSquared();
fT /= sqrLen;
PxVec3 n = u02 - fT*u01;
n.normalize();
const PxVec3 mP = simplex[2] + n * planeTolerance;
simplex[2] = mP;
vertices[imax] = mP;
retVal = false;
}
normal.normalize();
// set the forth vertex in the normal direction
float d0 = simplex[2].dot(normal);
maxDist = 0.0f;
imax = 0;
for (PxU32 i = 0; i < vertexCount; i++)
{
const PxVec3& testPoint = vertices[i];
float dist = PxAbs(testPoint.dot(normal) - d0);
if (dist > maxDist)
{
maxDist = dist;
// PT: simplex[3] is never used, is it?
simplex[3] = testPoint;
imax = i;
}
}
if (PxAbs(maxDist) < planeTolerance)
{
const float dist = (vertices[imax].dot(normal) - d0);
if (dist > 0)
vertices[imax] = vertices[imax] + normal * planeTolerance;
else
vertices[imax] = vertices[imax] - normal * planeTolerance;
retVal = false;
}
return retVal;
}
//////////////////////////////////////////////////////////////////////////
// expand the hull with the from the limited triangles set
// expand hull will do following steps:
// 1. get expand points from hull that form the best hull with given vertices
// 2. expand the planes to have all vertices inside the planes volume
// 3. compute new points by 3 adjacency planes intersections
// 4. take those points and create the hull from them
PxConvexMeshCookingResult::Enum QuickHullConvexHullLib::expandHull()
{
PxArray<local::ExpandPoint> expandPoints;
expandPoints.reserve(mQuickHull->mNumVertices);
// go over faces and gather expand points
for (PxU32 i = 0; i < mQuickHull->mHullFaces.size(); i++)
{
const local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
if(face.state == local::QuickHullFace::eVISIBLE)
{
local::ExpandPoint expandPoint;
local::QuickHullHalfEdge* he = face.edge;
local::getExpandPoint(*he, expandPoint);
local::addExpandPoint(expandPoint, expandPoints);
he = he->next;
while (he != face.edge)
{
local::getExpandPoint(*he, expandPoint);
local::addExpandPoint(expandPoint, expandPoints);
he = he->next;
}
}
}
// go over the planes now and expand them
for(PxU32 iVerts=0;iVerts< mQuickHull->mNumVertices;iVerts++)
{
const local::QuickHullVertex& vertex = mQuickHull->mVerticesList[iVerts];
for (PxU32 i = 0; i < mQuickHull->mHullFaces.size(); i++)
{
local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
if(face.state == local::QuickHullFace::eVISIBLE)
{
const float dist = face.distanceToPlane(vertex.point);
if(dist > 0 && dist > face.expandOffset)
{
face.expandOffset = dist;
}
}
}
}
// fill the expand points planes
for(PxU32 i=0;i<expandPoints.size();i++)
{
local::ExpandPoint& expandPoint = expandPoints[i];
for (PxU32 k = 0; k < 3; k++)
{
const local::QuickHullFace& face = *mQuickHull->mFreeFaces.getItem(expandPoint.planeIndex[k]);
PX_ASSERT(face.index == expandPoint.planeIndex[k]);
PxPlane plane;
plane.n = face.normal;
plane.d = -face.planeOffset;
if(face.expandOffset > 0.0f)
plane.d -= face.expandOffset;
expandPoint.plane[k] = plane;
}
}
// now find the plane intersection
PX_ALLOCA(vertices,PxVec3,expandPoints.size());
for(PxU32 i=0;i<expandPoints.size();i++)
{
local::ExpandPoint& expandPoint = expandPoints[i];
vertices[i] = local::threePlaneIntersection(expandPoint.plane[0],expandPoint.plane[1],expandPoint.plane[2]);
}
// construct again the hull from the new points
local::QuickHull* newHull = PX_NEW(local::QuickHull)(mQuickHull->mCookingParams, mQuickHull->mConvexDesc);
newHull->preallocate(expandPoints.size());
newHull->parseInputVertices(vertices,expandPoints.size());
local::QuickHullResult::Enum qhRes = newHull->buildHull();
switch(qhRes)
{
case local::QuickHullResult::eZERO_AREA_TEST_FAILED:
{
newHull->releaseHull();
PX_DELETE(newHull);
return PxConvexMeshCookingResult::eZERO_AREA_TEST_FAILED;
}
case local::QuickHullResult::eSUCCESS:
case local::QuickHullResult::eVERTEX_LIMIT_REACHED:
case local::QuickHullResult::ePOLYGONS_LIMIT_REACHED:
{
mQuickHull->releaseHull();
PX_DELETE(mQuickHull);
mQuickHull = newHull;
}
break;
case local::QuickHullResult::eFAILURE:
{
newHull->releaseHull();
PX_DELETE(newHull);
return PxConvexMeshCookingResult::eFAILURE;
}
};
return PxConvexMeshCookingResult::eSUCCESS;
}
//////////////////////////////////////////////////////////////////////////
// expand the hull from the limited triangles set
// 1. collect all planes
// 2. create OBB from the input verts
// 3. slice the OBB with the planes
// 5. iterate till vlimit is reached
PxConvexMeshCookingResult::Enum QuickHullConvexHullLib::expandHullOBB()
{
PxArray<PxPlane> expandPlanes;
expandPlanes.reserve(mQuickHull->mHullFaces.size());
// collect expand planes
for (PxU32 i = 0; i < mQuickHull->mHullFaces.size(); i++)
{
local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
if (face.state == local::QuickHullFace::eVISIBLE)
{
PxPlane plane;
plane.n = face.normal;
plane.d = -face.planeOffset;
if (face.expandOffset > 0.0f)
plane.d -= face.expandOffset;
expandPlanes.pushBack(plane);
}
}
PxTransform obbTransform;
PxVec3 sides;
// compute the OBB
PxConvexMeshDesc convexDesc;
fillConvexMeshDescFromQuickHull(convexDesc);
convexDesc.flags = mConvexMeshDesc.flags;
computeOBBFromConvex(convexDesc, sides, obbTransform);
// free the memory used for the convex mesh desc
PX_FREE(mOutMemoryBuffer);
mFaceTranslateTable = NULL;
// crop the OBB
PxU32 maxplanes = PxMin(PxU32(256), expandPlanes.size());
ConvexHull* c = PX_NEW(ConvexHull)(sides*0.5f,obbTransform, expandPlanes);
const float planeTolerance = mQuickHull->mPlaneTolerance;
const float epsilon = mQuickHull->mTolerance;
PxI32 k;
while (maxplanes-- && (k = c->findCandidatePlane(planeTolerance, epsilon)) >= 0)
{
ConvexHull* tmp = c;
c = convexHullCrop(*tmp, expandPlanes[PxU32(k)], planeTolerance);
if (c == NULL)
{
c = tmp;
break;
} // might want to debug this case better!!!
if (!c->assertIntact(planeTolerance))
{
PX_DELETE(c);
c = tmp;
break;
} // might want to debug this case better too!!!
// check for vertex limit
if (c->getVertices().size() > mConvexMeshDesc.vertexLimit)
{
PX_DELETE(c);
c = tmp;
maxplanes = 0;
break;
}
// check for vertex limit per face if necessary, GRB supports max 32 verts per face
if (((mConvexMeshDesc.flags & PxConvexFlag::eGPU_COMPATIBLE) || mCookingParams.buildGPUData) && c->maxNumVertsPerFace() > gpuMaxVertsPerFace)
{
PX_DELETE(c);
c = tmp;
maxplanes = 0;
break;
}
PX_DELETE(tmp);
}
PX_ASSERT(c->assertIntact(planeTolerance));
mCropedConvexHull = c;
return PxConvexMeshCookingResult::eSUCCESS;
}
//////////////////////////////////////////////////////////////////////////
bool QuickHullConvexHullLib::createEdgeList(const PxU32 nbIndices, const PxU8* indices, PxU8** outHullDataFacesByEdges8, PxU16** outEdgeData16, PxU16** outEdges)
{
// if we croped hull, we dont have the edge information, early exit
if (mCropedConvexHull)
return false;
PX_ASSERT(mQuickHull);
// Make sure we did recieved empty buffers
PX_ASSERT(*outHullDataFacesByEdges8 == NULL);
PX_ASSERT(*outEdges == NULL);
PX_ASSERT(*outEdgeData16 == NULL);
// Allocated the out bufferts
PxU8* hullDataFacesByEdges8 = PX_ALLOCATE(PxU8, nbIndices, "hullDataFacesByEdges8");
PxU16* edges = PX_ALLOCATE(PxU16, nbIndices, "edges");
PxU16* edgeData16 = PX_ALLOCATE(PxU16, nbIndices, "edgeData16");
*outHullDataFacesByEdges8 = hullDataFacesByEdges8;
*outEdges = edges;
*outEdgeData16 = edgeData16;
PxU16 edgeIndex = 0;
PxU32 edgeOffset = 0;
for(PxU32 i = 0; i < mQuickHull->mNumHullFaces; i++)
{
const local::QuickHullFace& face = *mQuickHull->mHullFaces[mFaceTranslateTable[i]];
// Face must be visible
PX_ASSERT(face.state == local::QuickHullFace::eVISIBLE);
// parse the edges
const PxU32 startEdgeOffset = edgeOffset;
local::QuickHullHalfEdge* hedge = face.edge;
do
{
// check if hedge has been stored
if(hedge->edgeIndex == 0xFFFFFFFF)
{
edges[edgeIndex*2] = indices[edgeOffset];
edges[edgeIndex*2 + 1] = indices[(hedge->next != face.edge) ? edgeOffset + 1 : startEdgeOffset];
hullDataFacesByEdges8[edgeIndex*2] = hedge->face->outIndex;
hullDataFacesByEdges8[edgeIndex*2 + 1] = hedge->next->twin->face->outIndex;
edgeData16[edgeOffset] = edgeIndex;
hedge->edgeIndex = edgeIndex;
hedge->next->twin->prev->edgeIndex = edgeIndex;
edgeIndex++;
}
else
{
edgeData16[edgeOffset] = PxTo16(hedge->edgeIndex);
}
hedge = hedge->next;
edgeOffset++;
} while (hedge != face.edge);
}
return true;
}
//////////////////////////////////////////////////////////////////////////
// fill the descriptor with computed verts, indices and polygons
void QuickHullConvexHullLib::fillConvexMeshDesc(PxConvexMeshDesc& desc)
{
if (mCropedConvexHull)
fillConvexMeshDescFromCroppedHull(desc);
else
fillConvexMeshDescFromQuickHull(desc);
if(mConvexMeshDesc.flags & PxConvexFlag::eSHIFT_VERTICES)
shiftConvexMeshDesc(desc);
}
//////////////////////////////////////////////////////////////////////////
// fill the descriptor with computed verts, indices and polygons from quickhull convex
void QuickHullConvexHullLib::fillConvexMeshDescFromQuickHull(PxConvexMeshDesc& desc)
{
// get the number of indices needed
PxU32 numIndices = 0;
PxU32 numFaces = mQuickHull->mHullFaces.size();
PxU32 numFacesOut = 0;
PxU32 largestFace = 0; // remember the largest face, we store it as the first face, required for GRB test (max 32 vers per face supported)
for (PxU32 i = 0; i < numFaces; i++)
{
const local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
if(face.state == local::QuickHullFace::eVISIBLE)
{
numFacesOut++;
numIndices += face.numEdges;
if(face.numEdges > mQuickHull->mHullFaces[largestFace]->numEdges)
largestFace = i;
}
}
// allocate out buffers
const PxU32 indicesBufferSize = sizeof(PxU32)*numIndices;
const PxU32 verticesBufferSize = sizeof(PxVec3)*(mQuickHull->mNumVertices + 1);
const PxU32 facesBufferSize = sizeof(PxHullPolygon)*numFacesOut;
const PxU32 faceTranslationTableSize = sizeof(PxU16)*numFacesOut;
const PxU32 translationTableSize = sizeof(PxU32)*mQuickHull->mNumVertices;
const PxU32 bufferMemorySize = indicesBufferSize + verticesBufferSize + facesBufferSize + faceTranslationTableSize + translationTableSize;
mOutMemoryBuffer = reinterpret_cast<PxU8*>(PX_ALLOC(bufferMemorySize, "ConvexMeshDesc"));
PxU32* indices = reinterpret_cast<PxU32*> (mOutMemoryBuffer);
PxVec3* vertices = reinterpret_cast<PxVec3*> (mOutMemoryBuffer + indicesBufferSize);
PxHullPolygon* polygons = reinterpret_cast<PxHullPolygon*> (mOutMemoryBuffer + indicesBufferSize + verticesBufferSize);
mFaceTranslateTable = reinterpret_cast<PxU16*> (mOutMemoryBuffer + indicesBufferSize + verticesBufferSize + facesBufferSize);
PxI32* translateTable = reinterpret_cast<PxI32*> (mOutMemoryBuffer + indicesBufferSize + verticesBufferSize + facesBufferSize + faceTranslationTableSize);
PxMemSet(translateTable,-1,mQuickHull->mNumVertices*sizeof(PxU32));
// go over the hullPolygons and mark valid vertices, create translateTable
PxU32 numVertices = 0;
for (PxU32 i = 0; i < numFaces; i++)
{
const local::QuickHullFace& face = *mQuickHull->mHullFaces[i];
if(face.state == local::QuickHullFace::eVISIBLE)
{
local::QuickHullHalfEdge* he = face.edge;
if(translateTable[he->tail.index] == -1)
{
vertices[numVertices] = he->tail.point;
translateTable[he->tail.index] = PxI32(numVertices);
numVertices++;
}
he = he->next;
while (he != face.edge)
{
if(translateTable[he->tail.index] == -1)
{
vertices[numVertices] = he->tail.point;
translateTable[he->tail.index] = PxI32(numVertices);
numVertices++;
}
he = he->next;
}
}
}
desc.points.count = numVertices;
desc.points.data = vertices;
desc.points.stride = sizeof(PxVec3);
desc.indices.count = numIndices;
desc.indices.data = indices;
desc.indices.stride = sizeof(PxU32);
desc.polygons.count = numFacesOut;
desc.polygons.data = polygons;
desc.polygons.stride = sizeof(PxHullPolygon);
PxU16 indexOffset = 0;
numFacesOut = 0;
for (PxU32 i = 0; i < numFaces; i++)
{
// faceIndex - store the largest face first then the rest
PxU32 faceIndex;
if(i == 0)
{
faceIndex = largestFace;
}
else
{
faceIndex = (i == largestFace) ? 0 : i;
}
local::QuickHullFace& face = *mQuickHull->mHullFaces[faceIndex];
if(face.state == local::QuickHullFace::eVISIBLE)
{
//create index data
local::QuickHullHalfEdge* he = face.edge;
PxU32 index = 0;
he->edgeIndex = 0xFFFFFFFF;
indices[index + indexOffset] = PxU32(translateTable[he->tail.index]);
index++;
he = he->next;
while (he != face.edge)
{
indices[index + indexOffset] = PxU32(translateTable[he->tail.index]);
index++;
he->edgeIndex = 0xFFFFFFFF;
he = he->next;
}
// create polygon
PxHullPolygon polygon;
polygon.mPlane[0] = face.normal[0];
polygon.mPlane[1] = face.normal[1];
polygon.mPlane[2] = face.normal[2];
polygon.mPlane[3] = -face.planeOffset;
polygon.mIndexBase = indexOffset;
polygon.mNbVerts = face.numEdges;
indexOffset += face.numEdges;
polygons[numFacesOut] = polygon;
mFaceTranslateTable[numFacesOut] = PxTo16(faceIndex);
face.outIndex = PxTo8(numFacesOut);
numFacesOut++;
}
}
PX_ASSERT(mQuickHull->mNumHullFaces == numFacesOut);
}
//////////////////////////////////////////////////////////////////////////
// fill the desc from cropped hull data
void QuickHullConvexHullLib::fillConvexMeshDescFromCroppedHull(PxConvexMeshDesc& outDesc)
{
PX_ASSERT(mCropedConvexHull);
// allocate the output buffers
const PxU32 numIndices = mCropedConvexHull->getEdges().size();
const PxU32 numPolygons = mCropedConvexHull->getFacets().size();
const PxU32 numVertices = mCropedConvexHull->getVertices().size();
const PxU32 indicesBufferSize = sizeof(PxU32)*numIndices;
const PxU32 facesBufferSize = sizeof(PxHullPolygon)*numPolygons;
const PxU32 verticesBufferSize = sizeof(PxVec3)*(numVertices + 1); // allocate additional vec3 for V4 safe load in VolumeInteration
const PxU32 bufferMemorySize = indicesBufferSize + verticesBufferSize + facesBufferSize;
mOutMemoryBuffer = reinterpret_cast<PxU8*>(PX_ALLOC(bufferMemorySize, "ConvexMeshDesc"));
// parse the hullOut and fill the result with vertices and polygons
PxU32* indicesOut = reinterpret_cast<PxU32*> (mOutMemoryBuffer);
PxHullPolygon* polygonsOut = reinterpret_cast<PxHullPolygon*> (mOutMemoryBuffer + indicesBufferSize);
PxVec3* vertsOut = reinterpret_cast<PxVec3*> (mOutMemoryBuffer + indicesBufferSize + facesBufferSize);
PxMemCopy(vertsOut, mCropedConvexHull->getVertices().begin(), sizeof(PxVec3)*numVertices);
PxU32 i = 0;
PxU32 k = 0;
PxU32 j = 1;
while (i < mCropedConvexHull->getEdges().size())
{
j = 1;
PxHullPolygon& polygon = polygonsOut[k];
// get num indices per polygon
while (j + i < mCropedConvexHull->getEdges().size() && mCropedConvexHull->getEdges()[i].p == mCropedConvexHull->getEdges()[i + j].p)
{
j++;
}
polygon.mNbVerts = PxTo16(j);
polygon.mIndexBase = PxTo16(i);
// get the plane
polygon.mPlane[0] = mCropedConvexHull->getFacets()[k].n[0];
polygon.mPlane[1] = mCropedConvexHull->getFacets()[k].n[1];
polygon.mPlane[2] = mCropedConvexHull->getFacets()[k].n[2];
polygon.mPlane[3] = mCropedConvexHull->getFacets()[k].d;
while (j--)
{
indicesOut[i] = mCropedConvexHull->getEdges()[i].v;
i++;
}
k++;
}
PX_ASSERT(k == mCropedConvexHull->getFacets().size());
outDesc.indices.count = numIndices;
outDesc.indices.stride = sizeof(PxU32);
outDesc.indices.data = indicesOut;
outDesc.points.count = numVertices;
outDesc.points.stride = sizeof(PxVec3);
outDesc.points.data = vertsOut;
outDesc.polygons.count = numPolygons;
outDesc.polygons.stride = sizeof(PxHullPolygon);
outDesc.polygons.data = polygonsOut;
swapLargestFace(outDesc);
}
| 75,802 | C++ | 28.256272 | 171 | 0.657173 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingConvexHullBuilder.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxMemory.h"
#include "cooking/PxCooking.h"
#include "GuEdgeList.h"
#include "GuTriangle.h"
#include "GuConvexMesh.h"
#include "GuMeshCleaner.h"
#include "GuCookingConvexHullBuilder.h"
#include "GuCookingConvexHullLib.h"
#include "foundation/PxArray.h"
#include "foundation/PxVecMath.h"
#include "CmRadixSort.h"
// PT: TODO: refactor/revisit this, looks like it comes from an old ICE file
// 7: added mHullDataFacesByVertices8
// 8: added mEdges
// 9: removed duplicite 'C', 'V', 'H', 'L' header
static const physx::PxU32 gVersion = 9;
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace aos;
#define USE_PRECOMPUTED_HULL_PROJECTION
///////////////////////////////////////////////////////////////////////////////
PX_IMPLEMENT_OUTPUT_ERROR
///////////////////////////////////////////////////////////////////////////////
// default constructor
ConvexHullBuilder::ConvexHullBuilder(ConvexHullData* hull, const bool buildGRBData) :
mHullDataHullVertices (NULL),
mHullDataPolygons (NULL),
mHullDataVertexData8 (NULL),
mHullDataFacesByEdges8 (NULL),
mHullDataFacesByVertices8 (NULL),
mEdgeData16 (NULL),
mEdges (NULL),
mHull (hull),
mBuildGRBData (buildGRBData)
{
}
//////////////////////////////////////////////////////////////////////////
// default destructor
ConvexHullBuilder::~ConvexHullBuilder()
{
PX_FREE(mEdgeData16);
PX_FREE(mEdges);
PX_FREE(mHullDataHullVertices);
PX_FREE(mHullDataPolygons);
PX_FREE(mHullDataVertexData8);
PX_FREE(mHullDataFacesByEdges8);
PX_FREE(mHullDataFacesByVertices8);
}
//////////////////////////////////////////////////////////////////////////
// initialize the convex hull
// \param nbVerts [in] number of vertices used
// \param verts [in] vertices array
// \param indices [in] indices array
// \param nbPolygons [in] number of polygons
// \param hullPolygons [in] polygons array
// \param doValidation [in] specifies whether we should run the validation code
// \param hullLib [in] if hullLib is provided, we can reuse the hull create data, hulllib is NULL in case of user provided polygons
bool ConvexHullBuilder::init(PxU32 nbVerts, const PxVec3* verts, const PxU32* indices, const PxU32 nbIndices,
const PxU32 nbPolygons, const PxHullPolygon* hullPolygons, bool doValidation, ConvexHullLib* hullLib)
{
PX_ASSERT(indices);
PX_ASSERT(verts);
PX_ASSERT(hullPolygons);
PX_ASSERT(nbVerts);
PX_ASSERT(nbPolygons);
mHullDataHullVertices = NULL;
mHullDataPolygons = NULL;
mHullDataVertexData8 = NULL;
mHullDataFacesByEdges8 = NULL;
mHullDataFacesByVertices8 = NULL;
mEdges = NULL;
mEdgeData16 = NULL;
mHull->mNbHullVertices = PxTo8(nbVerts);
// allocate additional vec3 for V4 safe load in VolumeInteration
mHullDataHullVertices = PX_ALLOCATE(PxVec3, (mHull->mNbHullVertices + 1), "PxVec3");
PxMemCopy(mHullDataHullVertices, verts, mHull->mNbHullVertices*sizeof(PxVec3));
// Cleanup
mHull->mNbPolygons = 0;
PX_FREE(mHullDataVertexData8);
PX_FREE(mHullDataPolygons);
if(nbPolygons>255)
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "ConvexHullBuilder::init: convex hull has more than 255 polygons!");
// Precompute hull polygon structures
mHull->mNbPolygons = PxTo8(nbPolygons);
mHullDataPolygons = PX_ALLOCATE(HullPolygonData, mHull->mNbPolygons, "Gu::HullPolygonData");
mHullDataVertexData8 = PX_ALLOCATE(PxU8, nbIndices, "mHullDataVertexData8");
PxU8* dest = mHullDataVertexData8;
for(PxU32 i=0;i<nbPolygons;i++)
{
const PxHullPolygon& inPolygon = hullPolygons[i];
mHullDataPolygons[i].mVRef8 = PxU16(dest - mHullDataVertexData8); // Setup link for current polygon
PxU32 numVerts = inPolygon.mNbVerts;
PX_ASSERT(numVerts>=3); // Else something very wrong happened...
mHullDataPolygons[i].mNbVerts = PxTo8(numVerts);
for (PxU32 j = 0; j < numVerts; j++)
{
dest[j] = PxTo8(indices[inPolygon.mIndexBase + j]);
}
mHullDataPolygons[i].mPlane = PxPlane(inPolygon.mPlane[0],inPolygon.mPlane[1],inPolygon.mPlane[2],inPolygon.mPlane[3]);
// Next one
dest += numVerts;
}
if(!calculateVertexMapTable(nbPolygons, (hullLib != NULL) ? false : true))
return false;
// moved create edge list here from save, copy. This is a part of the validation process and
// we need to create the edge list anyway
if(!hullLib || !hullLib->createEdgeList(nbIndices, mHullDataVertexData8, &mHullDataFacesByEdges8, &mEdgeData16, &mEdges))
{
if (!createEdgeList(doValidation, nbIndices))
return false;
}
else
{
mHull->mNbEdges = PxU16(nbIndices/2);
}
#ifdef USE_PRECOMPUTED_HULL_PROJECTION
// Loop through polygons
for (PxU32 j = 0; j < nbPolygons; j++)
{
// Precompute hull projection along local polygon normal
PxU32 NbVerts = mHull->mNbHullVertices;
const PxVec3* Verts = mHullDataHullVertices;
HullPolygonData& polygon = mHullDataPolygons[j];
PxReal min = PX_MAX_F32;
PxU8 minIndex = 0xff;
for (PxU8 i = 0; i < NbVerts; i++)
{
float dp = (*Verts++).dot(polygon.mPlane.n);
if (dp < min)
{
min = dp;
minIndex = i;
}
}
polygon.mMinIndex = minIndex;
}
#endif
if(doValidation)
return checkHullPolygons();
else
return true;
}
//////////////////////////////////////////////////////////////////////////
// hull polygons check
bool ConvexHullBuilder::checkHullPolygons() const
{
const PxVec3* hullVerts = mHullDataHullVertices;
const PxU8* vertexData = mHullDataVertexData8;
HullPolygonData* hullPolygons = mHullDataPolygons;
// Check hull validity
if(!hullVerts || !hullPolygons)
return false;
if(mHull->mNbPolygons<4)
return false;
PxVec3 max(-FLT_MAX,-FLT_MAX,-FLT_MAX);
PxVec3 hullMax = hullVerts[0];
PxVec3 hullMin = hullVerts[0];
for(PxU32 j=0;j<mHull->mNbHullVertices;j++)
{
const PxVec3& hullVert = hullVerts[j];
if(fabsf(hullVert.x) > max.x)
max.x = fabsf(hullVert.x);
if(fabsf(hullVert.y) > max.y)
max.y = fabsf(hullVert.y);
if(fabsf(hullVert.z) > max.z)
max.z = fabsf(hullVert.z);
if (hullVert.x > hullMax.x)
{
hullMax.x = hullVert.x;
}
else if (hullVert.x < hullMin.x)
{
hullMin.x = hullVert.x;
}
if (hullVert.y > hullMax.y)
{
hullMax.y = hullVert.y;
}
else if (hullVert.y < hullMin.y)
{
hullMin.y = hullVert.y;
}
if (hullVert.z > hullMax.z)
{
hullMax.z = hullVert.z;
}
else if (hullVert.z < hullMin.z)
{
hullMin.z = hullVert.z;
}
}
// compute the test epsilon the same way we construct the hull, verts are considered coplanar within this epsilon
const float planeTolerance = 0.02f;
const float testEpsilon = PxMax(planeTolerance * (PxMax(PxAbs(hullMax.x), PxAbs(hullMin.x)) +
PxMax(PxAbs(hullMax.y), PxAbs(hullMin.y)) +
PxMax(PxAbs(hullMax.z), PxAbs(hullMin.z))), planeTolerance);
max += PxVec3(testEpsilon, testEpsilon, testEpsilon);
PxVec3 testVectors[8];
bool foundPlane[8];
for (PxU32 i = 0; i < 8; i++)
{
foundPlane[i] = false;
}
testVectors[0] = PxVec3(max.x,max.y,max.z);
testVectors[1] = PxVec3(max.x,-max.y,-max.z);
testVectors[2] = PxVec3(max.x,max.y,-max.z);
testVectors[3] = PxVec3(max.x,-max.y,max.z);
testVectors[4] = PxVec3(-max.x,max.y,max.z);
testVectors[5] = PxVec3(-max.x,-max.y,max.z);
testVectors[6] = PxVec3(-max.x,max.y,-max.z);
testVectors[7] = PxVec3(-max.x,-max.y,-max.z);
// Extra convex hull validity check. This is less aggressive than previous convex decomposer!
// Loop through polygons
for(PxU32 i=0;i<mHull->mNbPolygons;i++)
{
const PxPlane& P = hullPolygons[i].mPlane;
for (PxU32 k = 0; k < 8; k++)
{
if(!foundPlane[k])
{
const float d = P.distance(testVectors[k]);
if(d >= 0)
{
foundPlane[k] = true;
}
}
}
// Test hull vertices against polygon plane
for(PxU32 j=0;j<mHull->mNbHullVertices;j++)
{
// Don't test vertex if it belongs to plane (to prevent numerical issues)
PxU32 nb = hullPolygons[i].mNbVerts;
bool discard=false;
for(PxU32 k=0;k<nb;k++)
{
if(vertexData[hullPolygons[i].mVRef8+k]==PxU8(j))
{
discard = true;
break;
}
}
if(!discard)
{
const float d = P.distance(hullVerts[j]);
// if(d>0.0001f)
//if(d>0.02f)
if(d > testEpsilon)
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Gu::ConvexMesh::checkHullPolygons: Some hull vertices seems to be too far from hull planes.");
}
}
}
for (PxU32 i = 0; i < 8; i++)
{
if(!foundPlane[i])
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Gu::ConvexMesh::checkHullPolygons: Hull seems to have opened volume or do (some) faces have reversed winding?");
}
return true;
}
//////////////////////////////////////////////////////////////////////////
// hull data store
PX_COMPILE_TIME_ASSERT(sizeof(EdgeDescData)==8);
PX_COMPILE_TIME_ASSERT(sizeof(EdgeData)==8);
bool ConvexHullBuilder::save(PxOutputStream& stream, bool platformMismatch) const
{
// Export header
if(!WriteHeader('C', 'L', 'H', 'L', gVersion, platformMismatch, stream))
return false;
// Export figures
//embed grb flag into mNbEdges
PxU16 hasGRBData = PxU16(mBuildGRBData);
hasGRBData = PxU16(hasGRBData << 15);
PX_ASSERT(mHull->mNbEdges <( (1 << 15) - 1));
const PxU16 nbEdges = PxU16(mHull->mNbEdges | hasGRBData);
writeDword(mHull->mNbHullVertices, platformMismatch, stream);
writeDword(nbEdges, platformMismatch, stream);
writeDword(computeNbPolygons(), platformMismatch, stream); // Use accessor to lazy-build
PxU32 nb=0;
for(PxU32 i=0;i<mHull->mNbPolygons;i++)
nb += mHullDataPolygons[i].mNbVerts;
writeDword(nb, platformMismatch, stream);
// Export triangles
writeFloatBuffer(&mHullDataHullVertices->x, PxU32(mHull->mNbHullVertices*3), platformMismatch, stream);
// Export polygons
// TODO: allow lazy-evaluation
// We can't really store the buffer in one run anymore!
for(PxU32 i=0;i<mHull->mNbPolygons;i++)
{
HullPolygonData tmpCopy = mHullDataPolygons[i];
if(platformMismatch)
flipData(tmpCopy);
stream.write(&tmpCopy, sizeof(HullPolygonData));
}
// PT: why not storeBuffer here?
for(PxU32 i=0;i<nb;i++)
stream.write(&mHullDataVertexData8[i], sizeof(PxU8));
stream.write(mHullDataFacesByEdges8, PxU32(mHull->mNbEdges*2));
stream.write(mHullDataFacesByVertices8, PxU32(mHull->mNbHullVertices*3));
if (mBuildGRBData)
writeWordBuffer(mEdges, PxU32(mHull->mNbEdges * 2), platformMismatch, stream);
return true;
}
//////////////////////////////////////////////////////////////////////////
bool ConvexHullBuilder::copy(ConvexHullData& hullData, PxU32& mNb)
{
// set the numbers
hullData.mNbHullVertices = mHull->mNbHullVertices;
PxU16 hasGRBData = PxU16(mBuildGRBData);
hasGRBData = PxU16(hasGRBData << 15);
PX_ASSERT(mHull->mNbEdges <((1 << 15) - 1));
hullData.mNbEdges = PxU16(mHull->mNbEdges | hasGRBData);;
hullData.mNbPolygons = PxTo8(computeNbPolygons());
PxU32 nb = 0;
for (PxU32 i = 0; i < mHull->mNbPolygons; i++)
nb += mHullDataPolygons[i].mNbVerts;
mNb = nb;
PxU32 bytesNeeded = computeBufferSize(hullData, nb);
// allocate the memory first.
void* dataMemory = PX_ALLOC(bytesNeeded, "ConvexHullData data");
PxU8* address = reinterpret_cast<PxU8*>(dataMemory);
// set data pointers
hullData.mPolygons = reinterpret_cast<HullPolygonData*>(address); address += sizeof(HullPolygonData) * hullData.mNbPolygons;
PxVec3* dataHullVertices = reinterpret_cast<PxVec3*>(address); address += sizeof(PxVec3) * hullData.mNbHullVertices;
PxU8* dataFacesByEdges8 = reinterpret_cast<PxU8*>(address); address += sizeof(PxU8) * hullData.mNbEdges * 2;
PxU8* dataFacesByVertices8 = reinterpret_cast<PxU8*>(address); address += sizeof(PxU8) * hullData.mNbHullVertices * 3;
PxU16* dataEdges = reinterpret_cast<PxU16*>(address); address += hullData.mNbEdges.isBitSet() ? sizeof(PxU16) *hullData.mNbEdges * 2 : 0;
PxU8* dataVertexData8 = reinterpret_cast<PxU8*>(address); address += sizeof(PxU8) * nb; // PT: leave that one last, so that we don't need to serialize "Nb"
PX_ASSERT(!(size_t(dataHullVertices) % sizeof(PxReal)));
PX_ASSERT(!(size_t(hullData.mPolygons) % sizeof(PxReal)));
PX_ASSERT(size_t(address) <= size_t(dataMemory) + bytesNeeded);
PX_ASSERT(mHullDataHullVertices);
PX_ASSERT(mHullDataPolygons);
PX_ASSERT(mHullDataVertexData8);
PX_ASSERT(mHullDataFacesByEdges8);
PX_ASSERT(mHullDataFacesByVertices8);
// copy the data
PxMemCopy(dataHullVertices, &mHullDataHullVertices->x, PxU32(mHull->mNbHullVertices * 3)*sizeof(float));
PxMemCopy(hullData.mPolygons, mHullDataPolygons , hullData.mNbPolygons*sizeof(HullPolygonData));
PxMemCopy(dataVertexData8, mHullDataVertexData8, nb);
PxMemCopy(dataFacesByEdges8,mHullDataFacesByEdges8, PxU32(mHull->mNbEdges * 2));
if (mBuildGRBData)
PxMemCopy(dataEdges, mEdges, PxU32(mHull->mNbEdges * 2) * sizeof(PxU16));
PxMemCopy(dataFacesByVertices8, mHullDataFacesByVertices8, PxU32(mHull->mNbHullVertices * 3));
return true;
}
//////////////////////////////////////////////////////////////////////////
// calculate vertex map table
bool ConvexHullBuilder::calculateVertexMapTable(PxU32 nbPolygons, bool userPolygons)
{
mHullDataFacesByVertices8 = PX_ALLOCATE(PxU8, mHull->mNbHullVertices*3u, "mHullDataFacesByVertices8");
PxU8 vertexMarker[256];
PxMemSet(vertexMarker, 0, mHull->mNbHullVertices);
for (PxU32 i = 0; i < nbPolygons; i++)
{
const HullPolygonData& polygon = mHullDataPolygons[i];
for (PxU32 k = 0; k < polygon.mNbVerts; ++k)
{
const PxU8 index = mHullDataVertexData8[polygon.mVRef8 + k];
if (vertexMarker[index] < 3)
{
//Found a polygon
mHullDataFacesByVertices8[index*3 + vertexMarker[index]++] = PxTo8(i);
}
}
}
bool noPlaneShift = false;
for (PxU32 i = 0; i < mHull->mNbHullVertices; ++i)
{
if(vertexMarker[i] != 3)
noPlaneShift = true;
}
if (noPlaneShift)
{
//PCM will use the original shape, which means it will have a huge performance drop
if (!userPolygons)
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "ConvexHullBuilder: convex hull does not have vertex-to-face info! Try to use different convex mesh cooking settings.");
else
outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "ConvexHullBuilder: convex hull does not have vertex-to-face info! Some of the vertices have less than 3 neighbor polygons. The vertex is most likely inside a polygon or on an edge between 2 polygons, please remove those vertices.");
for (PxU32 i = 0; i < mHull->mNbHullVertices; ++i)
{
mHullDataFacesByVertices8[i * 3 + 0] = 0xFF;
mHullDataFacesByVertices8[i * 3 + 1] = 0xFF;
mHullDataFacesByVertices8[i * 3 + 2] = 0xFF;
}
return false;
}
return true;
}
//////////////////////////////////////////////////////////////////////////
// create edge list
bool ConvexHullBuilder::createEdgeList(bool doValidation, PxU32 nbEdges)
{
// Code below could be greatly simplified if we assume manifold meshes!
//feodorb: ok, let's assume manifold meshes, since the code before this change
//would fail on non-maniflold meshes anyways
// We need the adjacency graph for hull polygons, similar to what we have for triangles.
// - sort the polygon edges and walk them in order
// - each edge should appear exactly twice since a convex is a manifold mesh without boundary edges
// - the polygon index is implicit when we walk the sorted list => get the 2 polygons back and update adjacency graph
//
// Two possible structures:
// - polygon to edges: needed for local search (actually: polygon to polygons)
// - edge to polygons: needed to compute edge normals on-the-fly
// Below is largely copied from the edge-list code
// Polygon to edges:
//
// We're dealing with convex polygons made of N vertices, defining N edges. For each edge we want the edge in
// an edge array.
//
// Edges to polygon:
//
// For each edge in the array, we want two polygon indices - ie an edge.
// 0) Compute the total size needed for "polygon to edges"
const PxU32 nbPolygons = mHull->mNbPolygons;
PxU32 nbEdgesUnshared = nbEdges;
// in a manifold mesh, each edge is repeated exactly twice as it shares exactly 2 faces
if (nbEdgesUnshared % 2 != 0)
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Cooking::cookConvexMesh: non-manifold mesh cannot be used, invalid mesh!");
// 1) Get some bytes: I need one EdgesRefs for each face, and some temp buffers
// Face indices by edge indices. First face is the one where the edge is ordered from tail to head.
PX_FREE(mHullDataFacesByEdges8);
mHullDataFacesByEdges8 = PX_ALLOCATE(PxU8, nbEdgesUnshared, "mHullDataFacesByEdges8");
PxU32* tempBuffer = PX_ALLOCATE(PxU32, nbEdgesUnshared*8, "tmp"); // Temp storage
PxU32* bufferAdd = tempBuffer;
PxU32* PX_RESTRICT vRefs0 = tempBuffer; tempBuffer += nbEdgesUnshared;
PxU32* PX_RESTRICT vRefs1 = tempBuffer; tempBuffer += nbEdgesUnshared;
PxU32* polyIndex = tempBuffer; tempBuffer += nbEdgesUnshared;
PxU32* vertexIndex = tempBuffer; tempBuffer += nbEdgesUnshared;
PxU32* polyIndex2 = tempBuffer; tempBuffer += nbEdgesUnshared;
PxU32* vertexIndex2 = tempBuffer; tempBuffer += nbEdgesUnshared;
PxU32* edgeIndex = tempBuffer; tempBuffer += nbEdgesUnshared;
PxU32* edgeData = tempBuffer; tempBuffer += nbEdgesUnshared;
// TODO avoroshilov: use the same "tempBuffer"
bool* flippedVRefs = PX_ALLOCATE(bool, nbEdgesUnshared, "tmp"); // Temp storage
PxU32* run0 = vRefs0;
PxU32* run1 = vRefs1;
PxU32* run2 = polyIndex;
PxU32* run3 = vertexIndex;
bool* run4 = flippedVRefs;
// 2) Create a full redundant list of edges
PxU32 edgeCounter = 0;
for(PxU32 i=0;i<nbPolygons;i++)
{
PxU32 nbVerts = mHullDataPolygons[i].mNbVerts;
const PxU8* PX_RESTRICT Data = mHullDataVertexData8 + mHullDataPolygons[i].mVRef8;
// Loop through polygon vertices
for(PxU32 j=0;j<nbVerts;j++)
{
PxU32 vRef0 = Data[j];
PxU32 vRef1 = Data[(j+1)%nbVerts];
bool flipped = vRef0>vRef1;
if (flipped)
physx::PxSwap(vRef0, vRef1);
*run0++ = vRef0;
*run1++ = vRef1;
*run2++ = i;
*run3++ = j;
*run4++ = flipped;
edgeData[edgeCounter] = edgeCounter;
edgeCounter++;
}
}
PX_ASSERT(PxU32(run0-vRefs0)==nbEdgesUnshared);
PX_ASSERT(PxU32(run1-vRefs1)==nbEdgesUnshared);
// 3) Sort the list according to both keys (VRefs0 and VRefs1)
Cm::RadixSortBuffered sorter;
const PxU32* PX_RESTRICT sorted = sorter.Sort(vRefs1, nbEdgesUnshared,Cm::RADIX_UNSIGNED).Sort(vRefs0, nbEdgesUnshared,Cm::RADIX_UNSIGNED).GetRanks();
PX_FREE(mEdges);
// Edges by their tail and head VRefs. NbEdgesUnshared == nbEdges * 2
// mEdges[edgeIdx*2 + 0] = tailVref, mEdges[edgeIdx*2 + 1] = headVref
// Tails and heads should be consistent with face refs, so that the edge is given in the order of
// his first face and opposite to the order of his second face
mEdges = PX_ALLOCATE(PxU16, nbEdgesUnshared, "mEdges");
PX_FREE(mEdgeData16);
// Face to edge mapping
mEdgeData16 = PX_ALLOCATE(PxU16, nbEdgesUnshared, "mEdgeData16");
// TODO avoroshilov: remove this comment
//mHull->mNbEdges = PxTo16(nbEdgesUnshared / 2); // #non-redundant edges
mHull->mNbEdges = 0; // #non-redundant edges
// 4) Loop through all possible edges
// - clean edges list by removing redundant edges
// - create EdgesRef list
// mNbFaces = nbFaces;
// TODO avoroshilov:
PxU32 numFacesPerEdgeVerificationCounter = 0;
PxU16* edgeVertOutput = mEdges;
PxU32 previousRef0 = PX_INVALID_U32;
PxU32 previousRef1 = PX_INVALID_U32;
PxU32 previousPolyId = PX_INVALID_U32;
PxU16 nbHullEdges = 0;
for (PxU32 i = 0; i < nbEdgesUnshared; i++)
{
const PxU32 sortedIndex = sorted[i]; // Between 0 and Nb
const PxU32 polyID = polyIndex[sortedIndex]; // Poly index
const PxU32 vertexID = vertexIndex[sortedIndex]; // Poly index
PxU32 sortedRef0 = vRefs0[sortedIndex]; // (SortedRef0, SortedRef1) is the sorted edge
PxU32 sortedRef1 = vRefs1[sortedIndex];
bool flipped = flippedVRefs[sortedIndex];
if (sortedRef0 != previousRef0 || sortedRef1 != previousRef1)
{
// TODO avoroshilov: remove this?
if (i != 0 && numFacesPerEdgeVerificationCounter != 1)
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Cooking::cookConvexMesh: non-manifold mesh cannot be used, invalid mesh!");
numFacesPerEdgeVerificationCounter = 0;
// ### TODO: change this in edge list as well
previousRef0 = sortedRef0;
previousRef1 = sortedRef1;
previousPolyId = polyID;
//feodorb:restore the original order of VRefs (tail and head)
if (flipped)
physx::PxSwap(sortedRef0, sortedRef1);
*edgeVertOutput++ = PxTo16(sortedRef0);
*edgeVertOutput++ = PxTo16(sortedRef1);
nbHullEdges++;
}
else
{
mHullDataFacesByEdges8[(nbHullEdges - 1) * 2] = PxTo8(previousPolyId);
mHullDataFacesByEdges8[(nbHullEdges - 1) * 2 + 1] = PxTo8(polyID);
++numFacesPerEdgeVerificationCounter;
}
mEdgeData16[mHullDataPolygons[polyID].mVRef8 + vertexID] = PxTo16(i / 2);
// Create mEdgesRef on the fly
polyIndex2[i] = polyID;
vertexIndex2[i] = vertexID;
edgeIndex[i] = PxU32(nbHullEdges - 1);
}
mHull->mNbEdges = nbHullEdges;
//////////////////////
// 2) Get some bytes: one Pair structure / edge
// create this structure only for validation purpose
// 3) Create Counters, ie compute the #faces sharing each edge
if(doValidation)
{
//
sorted = sorter.Sort(vertexIndex2, nbEdgesUnshared, Cm::RADIX_UNSIGNED).Sort(polyIndex2, nbEdgesUnshared, Cm::RADIX_UNSIGNED).GetRanks();
for (PxU32 i = 0; i < nbEdgesUnshared; i++) edgeData[i] = edgeIndex[sorted[i]];
const PxU16 nbToGo = PxU16(mHull->mNbEdges);
EdgeDescData* edgeToTriangles = PX_ALLOCATE(EdgeDescData, nbToGo, "edgeToTriangles");
PxMemZero(edgeToTriangles, sizeof(EdgeDescData)*nbToGo);
PxU32* data = edgeData;
for(PxU32 i=0;i<nbEdgesUnshared;i++) // <= maybe not the same Nb
{
edgeToTriangles[*data++].Count++;
}
// if we don't have a manifold mesh, this can fail... but the runtime would assert in any case
for (PxU32 i = 0; i < nbToGo; i++)
{
if (edgeToTriangles[i].Count != 2)
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "Cooking::cookConvexMesh: non-manifold mesh cannot be used, invalid mesh!");
}
PX_FREE(edgeToTriangles);
}
// TODO avoroshilov: use the same "tempBuffer"
PX_FREE(flippedVRefs);
// ### free temp ram
PX_FREE(bufferAdd);
return true;
}
| 24,094 | C++ | 32.93662 | 288 | 0.695277 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingTriangleMesh.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuCooking.h"
#include "cooking/PxTriangleMeshDesc.h"
#include "foundation/PxHashMap.h"
#include "foundation/PxSort.h"
#include "foundation/PxFPU.h"
#include "common/PxInsertionCallback.h"
#include "GuRTreeCooking.h"
#include "GuCookingTriangleMesh.h"
#include "GuEdgeList.h"
#include "GuMeshCleaner.h"
#include "GuConvexEdgeFlags.h"
#include "GuTriangle.h"
#include "GuBV4Build.h"
#include "GuBV32Build.h"
#include "GuBounds.h"
#include "CmSerialize.h"
#include "GuCookingGrbTriangleMesh.h"
#include "GuCookingVolumeIntegration.h"
#include "GuCookingSDF.h"
#include "GuMeshAnalysis.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
///////////////////////////////////////////////////////////////////////////////
PX_IMPLEMENT_OUTPUT_ERROR
///////////////////////////////////////////////////////////////////////////////
TriangleMeshBuilder::TriangleMeshBuilder(TriangleMeshData& m, const PxCookingParams& params) :
mEdgeList (NULL),
mParams (params),
mMeshData (m)
{
}
TriangleMeshBuilder::~TriangleMeshBuilder()
{
PX_DELETE(mEdgeList);
}
void TriangleMeshBuilder::remapTopology(const PxU32* order)
{
if(!mMeshData.mNbTriangles)
return;
GU_PROFILE_ZONE("remapTopology")
// Remap one array at a time to limit memory usage
IndexedTriangle32* newTopo = PX_ALLOCATE(IndexedTriangle32, mMeshData.mNbTriangles, "IndexedTriangle32");
for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
newTopo[i] = reinterpret_cast<IndexedTriangle32*>(mMeshData.mTriangles)[order[i]];
PX_FREE(mMeshData.mTriangles);
mMeshData.mTriangles = newTopo;
if(mMeshData.mMaterialIndices)
{
PxMaterialTableIndex* newMat = PX_ALLOCATE(PxMaterialTableIndex, mMeshData.mNbTriangles, "mMaterialIndices");
for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
newMat[i] = mMeshData.mMaterialIndices[order[i]];
PX_FREE(mMeshData.mMaterialIndices);
mMeshData.mMaterialIndices = newMat;
}
if(!mParams.suppressTriangleMeshRemapTable || mParams.buildGPUData)
{
PxU32* newMap = PX_ALLOCATE(PxU32, mMeshData.mNbTriangles, "mFaceRemap");
for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
newMap[i] = mMeshData.mFaceRemap ? mMeshData.mFaceRemap[order[i]] : order[i];
PX_FREE(mMeshData.mFaceRemap);
mMeshData.mFaceRemap = newMap;
}
}
///////////////////////////////////////////////////////////////////////////////
bool TriangleMeshBuilder::cleanMesh(bool validate, PxTriangleMeshCookingResult::Enum* condition)
{
PX_ASSERT(mMeshData.mFaceRemap == NULL);
PxF32 meshWeldTolerance = 0.0f;
if(mParams.meshPreprocessParams & PxMeshPreprocessingFlag::eWELD_VERTICES)
{
if(mParams.meshWeldTolerance == 0.0f)
outputError<PxErrorCode::eDEBUG_WARNING>(__LINE__, "TriangleMeshBuilder::cleanMesh: mesh welding enabled with 0 weld tolerance!");
else
meshWeldTolerance = mParams.meshWeldTolerance;
}
MeshCleaner cleaner(mMeshData.mNbVertices, mMeshData.mVertices, mMeshData.mNbTriangles, reinterpret_cast<const PxU32*>(mMeshData.mTriangles), meshWeldTolerance, mParams.meshAreaMinLimit);
if(!cleaner.mNbTris)
{
if(condition)
*condition = PxTriangleMeshCookingResult::eEMPTY_MESH;
return outputError<PxErrorCode::eDEBUG_WARNING>(__LINE__, "TriangleMeshBuilder::cleanMesh: mesh cleaning removed all triangles!");
}
if(validate)
{
// if we do only validate, we check if cleaning did not remove any verts or triangles.
// such a mesh can be then directly used for cooking without clean flag
if((cleaner.mNbVerts != mMeshData.mNbVertices) || (cleaner.mNbTris != mMeshData.mNbTriangles))
return false;
}
// PT: deal with the remap table
{
// PT: TODO: optimize this
if(cleaner.mRemap)
{
const PxU32 newNbTris = cleaner.mNbTris;
// Remap material array
if(mMeshData.mMaterialIndices)
{
PxMaterialTableIndex* tmp = PX_ALLOCATE(PxMaterialTableIndex, newNbTris, "mMaterialIndices");
for(PxU32 i=0;i<newNbTris;i++)
tmp[i] = mMeshData.mMaterialIndices[cleaner.mRemap[i]];
PX_FREE(mMeshData.mMaterialIndices);
mMeshData.mMaterialIndices = tmp;
}
if (!mParams.suppressTriangleMeshRemapTable || mParams.buildGPUData)
{
mMeshData.mFaceRemap = PX_ALLOCATE(PxU32, newNbTris, "mFaceRemap");
PxMemCopy(mMeshData.mFaceRemap, cleaner.mRemap, newNbTris*sizeof(PxU32));
}
}
}
// PT: deal with geometry
{
if(mMeshData.mNbVertices!=cleaner.mNbVerts)
{
PX_FREE(mMeshData.mVertices);
mMeshData.allocateVertices(cleaner.mNbVerts);
}
PxMemCopy(mMeshData.mVertices, cleaner.mVerts, mMeshData.mNbVertices*sizeof(PxVec3));
}
// PT: deal with topology
{
PX_ASSERT(!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
if(mMeshData.mNbTriangles!=cleaner.mNbTris)
{
PX_FREE(mMeshData.mTriangles);
mMeshData.allocateTriangles(cleaner.mNbTris, true);
}
const bool testEdgeLength = mParams.meshEdgeLengthMaxLimit!=0.0f;
const float testLengthSquared = mParams.meshEdgeLengthMaxLimit * mParams.meshEdgeLengthMaxLimit * mParams.scale.length * mParams.scale.length;
bool foundLargeTriangle = false;
const PxVec3* v = mMeshData.mVertices;
for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
{
const PxU32 vref0 = cleaner.mIndices[i*3+0];
const PxU32 vref1 = cleaner.mIndices[i*3+1];
const PxU32 vref2 = cleaner.mIndices[i*3+2];
PX_ASSERT(vref0!=vref1 && vref0!=vref2 && vref1!=vref2);
reinterpret_cast<IndexedTriangle32*>(mMeshData.mTriangles)[i].mRef[0] = vref0;
reinterpret_cast<IndexedTriangle32*>(mMeshData.mTriangles)[i].mRef[1] = vref1;
reinterpret_cast<IndexedTriangle32*>(mMeshData.mTriangles)[i].mRef[2] = vref2;
if(testEdgeLength)
{
const PxVec3& v0 = v[vref0];
const PxVec3& v1 = v[vref1];
const PxVec3& v2 = v[vref2];
if( (v0 - v1).magnitudeSquared() >= testLengthSquared
|| (v1 - v2).magnitudeSquared() >= testLengthSquared
|| (v2 - v0).magnitudeSquared() >= testLengthSquared
)
foundLargeTriangle = true;
}
}
if(foundLargeTriangle)
{
if(condition)
*condition = PxTriangleMeshCookingResult::eLARGE_TRIANGLE;
outputError<PxErrorCode::eDEBUG_WARNING>(__LINE__, "TriangleMesh: triangles are too big, reduce their size to increase simulation stability!");
}
}
return true;
}
static EdgeList* createEdgeList(const TriangleMeshData& meshData)
{
EDGELISTCREATE create;
create.NbFaces = meshData.mNbTriangles;
if(meshData.has16BitIndices())
{
create.DFaces = NULL;
create.WFaces = reinterpret_cast<PxU16*>(meshData.mTriangles);
}
else
{
create.DFaces = reinterpret_cast<PxU32*>(meshData.mTriangles);
create.WFaces = NULL;
}
create.FacesToEdges = true;
create.EdgesToFaces = true;
create.Verts = meshData.mVertices;
//create.Epsilon = 0.1f;
// create.Epsilon = convexEdgeThreshold;
EdgeList* edgeList = PX_NEW(EdgeList);
if(!edgeList->init(create))
{
PX_DELETE(edgeList);
}
return edgeList;
}
void TriangleMeshBuilder::createSharedEdgeData(bool buildAdjacencies, bool buildActiveEdges)
{
GU_PROFILE_ZONE("createSharedEdgeData")
const bool savedFlag = buildActiveEdges;
if(buildAdjacencies) // building edges is required if buildAdjacencies is requested
buildActiveEdges = true;
PX_ASSERT(mMeshData.mExtraTrigData == NULL);
PX_ASSERT(mMeshData.mAdjacencies == NULL);
if(!buildActiveEdges)
return;
const PxU32 nTrigs = mMeshData.mNbTriangles;
if(0x40000000 <= nTrigs)
{
//mesh is too big for this algo, need to be able to express trig indices in 30 bits, and still have an index reserved for "unused":
outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "TriangleMesh: mesh is too big for this algo!");
return;
}
mMeshData.mExtraTrigData = PX_ALLOCATE(PxU8, nTrigs, "mExtraTrigData");
PxMemZero(mMeshData.mExtraTrigData, sizeof(PxU8)*nTrigs);
const IndexedTriangle32* trigs = reinterpret_cast<const IndexedTriangle32*>(mMeshData.mTriangles);
mEdgeList = createEdgeList(mMeshData);
if(mEdgeList)
{
PX_ASSERT(mEdgeList->getNbFaces()==mMeshData.mNbTriangles);
if(mEdgeList->getNbFaces()==mMeshData.mNbTriangles)
{
for(PxU32 i=0;i<mEdgeList->getNbFaces();i++)
{
const EdgeTriangleData& ET = mEdgeList->getEdgeTriangle(i);
// Replicate flags
if(EdgeTriangleAC::HasActiveEdge01(ET))
mMeshData.mExtraTrigData[i] |= ETD_CONVEX_EDGE_01;
if(EdgeTriangleAC::HasActiveEdge12(ET))
mMeshData.mExtraTrigData[i] |= ETD_CONVEX_EDGE_12;
if(EdgeTriangleAC::HasActiveEdge20(ET))
mMeshData.mExtraTrigData[i] |= ETD_CONVEX_EDGE_20;
}
}
}
// fill the adjacencies
if(buildAdjacencies)
{
mMeshData.mAdjacencies = PX_ALLOCATE(PxU32, nTrigs*3, "mAdjacencies");
memset(mMeshData.mAdjacencies, 0xFFFFffff, sizeof(PxU32)*nTrigs*3);
PxU32 NbEdges = mEdgeList->getNbEdges();
const EdgeDescData* ED = mEdgeList->getEdgeToTriangles();
const EdgeData* Edges = mEdgeList->getEdges();
const PxU32* FBE = mEdgeList->getFacesByEdges();
while(NbEdges--)
{
// Get number of triangles sharing current edge
const PxU32 Count = ED->Count;
if(Count > 1)
{
const PxU32 FaceIndex0 = FBE[ED->Offset+0];
const PxU32 FaceIndex1 = FBE[ED->Offset+1];
const EdgeData& edgeData = *Edges;
const IndexedTriangle32& T0 = trigs[FaceIndex0];
const IndexedTriangle32& T1 = trigs[FaceIndex1];
const PxU32 offset0 = T0.findEdgeCCW(edgeData.Ref0,edgeData.Ref1);
const PxU32 offset1 = T1.findEdgeCCW(edgeData.Ref0,edgeData.Ref1);
mMeshData.setTriangleAdjacency(FaceIndex0, FaceIndex1, offset0);
mMeshData.setTriangleAdjacency(FaceIndex1, FaceIndex0, offset1);
}
ED++;
Edges++;
}
}
#if PX_DEBUG
for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
{
const IndexedTriangle32& T = trigs[i];
PX_UNUSED(T);
const EdgeTriangleData& ET = mEdgeList->getEdgeTriangle(i);
PX_ASSERT((EdgeTriangleAC::HasActiveEdge01(ET) && (mMeshData.mExtraTrigData[i] & ETD_CONVEX_EDGE_01)) || (!EdgeTriangleAC::HasActiveEdge01(ET) && !(mMeshData.mExtraTrigData[i] & ETD_CONVEX_EDGE_01)));
PX_ASSERT((EdgeTriangleAC::HasActiveEdge12(ET) && (mMeshData.mExtraTrigData[i] & ETD_CONVEX_EDGE_12)) || (!EdgeTriangleAC::HasActiveEdge12(ET) && !(mMeshData.mExtraTrigData[i] & ETD_CONVEX_EDGE_12)));
PX_ASSERT((EdgeTriangleAC::HasActiveEdge20(ET) && (mMeshData.mExtraTrigData[i] & ETD_CONVEX_EDGE_20)) || (!EdgeTriangleAC::HasActiveEdge20(ET) && !(mMeshData.mExtraTrigData[i] & ETD_CONVEX_EDGE_20)));
}
#endif
// PT: respect the PxMeshPreprocessingFlag::eDISABLE_ACTIVE_EDGES_PRECOMPUTE flag. This is important for
// deformable meshes - even if the edge data was needed on-the-fly to compute adjacencies.
if(!savedFlag)
PX_FREE(mMeshData.mExtraTrigData);
}
void TriangleMeshBuilder::createVertMapping()
{
GU_PROFILE_ZONE("createVertMapping")
const PxU32 nbVerts = mMeshData.mNbVertices;
mMeshData.mAccumulatedTrianglesRef = PX_ALLOCATE(PxU32, nbVerts, "accumulatedTrianglesRef");
PxU32* tempCounts = PX_ALLOCATE(PxU32, nbVerts, "tempCounts");
PxU32* triangleCounts = mMeshData.mAccumulatedTrianglesRef;
PxMemZero(triangleCounts, sizeof(PxU32) * nbVerts);
PxMemZero(tempCounts, sizeof(PxU32) * nbVerts);
const PxU32 nbTriangles = mMeshData.mNbTriangles;
IndexedTriangle32* triangles = reinterpret_cast<IndexedTriangle32*>(mMeshData.mGRB_primIndices);
for (PxU32 i = 0; i < nbTriangles; i++)
{
IndexedTriangle32& triangle = triangles[i];
triangleCounts[triangle.mRef[0]]++;
triangleCounts[triangle.mRef[1]]++;
triangleCounts[triangle.mRef[2]]++;
}
//compute runsum
PxU32 totalReference = 0;
for (PxU32 i = 0; i < nbVerts; ++i)
{
PxU32 originalReference = triangleCounts[i];
triangleCounts[i] = totalReference;
totalReference += originalReference;
}
PX_ASSERT(totalReference == nbTriangles * 3);
mMeshData.mTrianglesReferences = PX_ALLOCATE(PxU32, totalReference, "mTrianglesReferences");
mMeshData.mNbTrianglesReferences = totalReference;
PxU32* triangleRefs = mMeshData.mTrianglesReferences;
for (PxU32 i = 0; i < nbTriangles; i++)
{
IndexedTriangle32& triangle = triangles[i];
const PxU32 ind0 = triangle.mRef[0];
const PxU32 ind1 = triangle.mRef[1];
const PxU32 ind2 = triangle.mRef[2];
triangleRefs[triangleCounts[ind0] + tempCounts[ind0]] = i;
tempCounts[ind0]++;
triangleRefs[triangleCounts[ind1] + tempCounts[ind1]] = i;
tempCounts[ind1]++;
triangleRefs[triangleCounts[ind2] + tempCounts[ind2]] = i;
tempCounts[ind2]++;
}
PX_FREE(tempCounts);
}
void TriangleMeshBuilder::recordTriangleIndices()
{
if (mParams.buildGPUData)
{
PX_ASSERT(!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
PX_ASSERT(mMeshData.mGRB_primIndices);
//copy the BV4 triangle indices to GPU triangle indices buffer
PxMemCopy(mMeshData.mGRB_primIndices, mMeshData.mTriangles, sizeof(IndTri32) *mMeshData.mNbTriangles);
}
}
void TriangleMeshBuilder::createGRBData()
{
GU_PROFILE_ZONE("buildAdjacencies")
const PxU32 numTris = mMeshData.mNbTriangles;
PX_ASSERT(!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
// Core: Mesh data
///////////////////////////////////////////////////////////////////////////////////
// (by using adjacency info generated by physx cooker)
PxVec3* tempNormalsPerTri_prealloc = PX_ALLOCATE(PxVec3, numTris, "tempNormalsPerTri_prealloc");
mMeshData.mGRB_primAdjacencies = PX_ALLOCATE(uint4, numTris, "GRB_triAdjacencies");
buildAdjacencies(
reinterpret_cast<uint4*>(mMeshData.mGRB_primAdjacencies),
tempNormalsPerTri_prealloc,
mMeshData.mVertices,
reinterpret_cast<IndexedTriangle32*>(mMeshData.mGRB_primIndices),
numTris
);
PX_FREE(tempNormalsPerTri_prealloc);
}
bool TriangleMeshBuilder::createGRBMidPhaseAndData(const PxU32 originalTriangleCount)
{
PX_UNUSED(originalTriangleCount);
if (mParams.buildGPUData)
{
PX_ASSERT(!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
BV32Tree* bv32Tree = PX_NEW(BV32Tree);
mMeshData.mGRB_BV32Tree = bv32Tree;
if(!BV32TriangleMeshBuilder::createMidPhaseStructure(mParams, mMeshData, *bv32Tree))
return false;
createGRBData();
if (mParams.meshPreprocessParams & PxMeshPreprocessingFlag::eENABLE_VERT_MAPPING || mParams.buildGPUData)
createVertMapping();
#if BV32_VALIDATE
IndTri32* grbTriIndices = reinterpret_cast<IndTri32*>(mMeshData.mGRB_primIndices);
IndTri32* cpuTriIndices = reinterpret_cast<IndTri32*>(mMeshData.mTriangles);
//map CPU remap triangle index to GPU remap triangle index
for (PxU32 i = 0; i < mMeshData.mNbTriangles; ++i)
{
PX_ASSERT(grbTriIndices[i].mRef[0] == cpuTriIndices[mMeshData.mGRB_faceRemap[i]].mRef[0]);
PX_ASSERT(grbTriIndices[i].mRef[1] == cpuTriIndices[mMeshData.mGRB_faceRemap[i]].mRef[1]);
PX_ASSERT(grbTriIndices[i].mRef[2] == cpuTriIndices[mMeshData.mGRB_faceRemap[i]].mRef[2]);
}
#endif
}
return true;
}
bool TriangleMeshBuilder::loadFromDescInternal(PxTriangleMeshDesc& desc, PxTriangleMeshCookingResult::Enum* condition, bool validateMesh)
{
#ifdef PROFILE_MESH_COOKING
printf("\n");
#endif
const PxU32 originalTriangleCount = desc.triangles.count;
if (!desc.isValid())
return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "TriangleMesh::loadFromDesc: desc.isValid() failed!");
// verify the mesh params
if (!mParams.midphaseDesc.isValid())
return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "TriangleMesh::loadFromDesc: mParams.midphaseDesc.isValid() failed!");
// Save simple params
{
// Handle implicit topology
PxU32* topology = NULL;
if (!desc.triangles.data)
{
// We'll create 32-bit indices
desc.flags &= ~PxMeshFlag::e16_BIT_INDICES;
desc.triangles.stride = sizeof(PxU32) * 3;
{
// Non-indexed mesh => create implicit topology
desc.triangles.count = desc.points.count / 3;
// Create default implicit topology
topology = PX_ALLOCATE(PxU32, desc.points.count, "topology");
for (PxU32 i = 0; i<desc.points.count; i++)
topology[i] = i;
desc.triangles.data = topology;
}
}
// Convert and clean the input mesh
if (!importMesh(desc, condition, validateMesh))
{
PX_FREE(topology);
return false;
}
// Cleanup if needed
PX_FREE(topology);
}
if(!createMidPhaseStructure())
return false;
//copy the BV4 triangle indices to grb triangle indices if buildGRBData is true
recordTriangleIndices();
// Compute local bounds
computeLocalBoundsAndGeomEpsilon(mMeshData.mVertices, mMeshData.mNbVertices, mMeshData.mAABB, mMeshData.mGeomEpsilon);
createSharedEdgeData(mParams.buildTriangleAdjacencies, !(mParams.meshPreprocessParams & PxMeshPreprocessingFlag::eDISABLE_ACTIVE_EDGES_PRECOMPUTE));
return createGRBMidPhaseAndData(originalTriangleCount);
}
void TriangleMeshBuilder::buildInertiaTensor(bool flipNormals)
{
PxTriangleMeshDesc simpleMesh;
simpleMesh.points.count = mMeshData.mNbVertices;
simpleMesh.points.stride = sizeof(PxVec3);
simpleMesh.points.data = mMeshData.mVertices;
simpleMesh.triangles.count = mMeshData.mNbTriangles;
simpleMesh.triangles.stride = sizeof(PxU32) * 3;
simpleMesh.triangles.data = mMeshData.mTriangles;
simpleMesh.flags &= (~PxMeshFlag::e16_BIT_INDICES);
if (flipNormals)
simpleMesh.flags.raise(PxMeshFlag::eFLIPNORMALS);
PxIntegrals integrals;
computeVolumeIntegrals(simpleMesh, 1, integrals);
integrals.getOriginInertia(mMeshData.mInertia);
mMeshData.mMass = PxReal(integrals.mass);
mMeshData.mLocalCenterOfMass = integrals.COM;
}
void TriangleMeshBuilder::buildInertiaTensorFromSDF()
{
if (MeshAnalyzer::checkMeshWatertightness(reinterpret_cast<const Gu::Triangle*>(mMeshData.mTriangles), mMeshData.mNbTriangles))
{
buildInertiaTensor();
if (mMeshData.mMass < 0.0f)
buildInertiaTensor(true); //The mesh can be watertight but all triangles might be oriented the wrong way round
return;
}
PxArray<PxVec3> points;
PxArray<PxU32> triangleIndices;
extractIsosurfaceFromSDF(mMeshData.mSdfData, points, triangleIndices);
PxTriangleMeshDesc simpleMesh;
simpleMesh.points.count = points.size();
simpleMesh.points.stride = sizeof(PxVec3);
simpleMesh.points.data = points.begin();
simpleMesh.triangles.count = triangleIndices.size() / 3;
simpleMesh.triangles.stride = sizeof(PxU32) * 3;
simpleMesh.triangles.data = triangleIndices.begin();
simpleMesh.flags &= (~PxMeshFlag::e16_BIT_INDICES);
PxIntegrals integrals;
computeVolumeIntegrals(simpleMesh, 1, integrals);
integrals.getOriginInertia(mMeshData.mInertia);
mMeshData.mMass = PxReal(integrals.mass);
mMeshData.mLocalCenterOfMass = integrals.COM;
}
//
// When suppressTriangleMeshRemapTable is true, the face remap table is not created. This saves a significant amount of memory,
// but the SDK will not be able to provide information about which mesh triangle is hit in collisions, sweeps or raycasts hits.
//
// The sequence is as follows:
bool TriangleMeshBuilder::loadFromDesc(const PxTriangleMeshDesc& _desc, PxTriangleMeshCookingResult::Enum* condition, bool validateMesh)
{
PxTriangleMeshDesc desc = _desc;
return loadFromDescInternal(desc, condition, validateMesh);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool TriangleMeshBuilder::save(PxOutputStream& stream, bool platformMismatch, const PxCookingParams& params) const
{
// Export header
if(!writeHeader('M', 'E', 'S', 'H', PX_MESH_VERSION, platformMismatch, stream))
return false;
// Export midphase ID
writeDword(getMidphaseID(), platformMismatch, stream);
// Export serialization flags
PxU32 serialFlags = 0;
if(mMeshData.mMaterialIndices) serialFlags |= IMSF_MATERIALS;
if(mMeshData.mFaceRemap) serialFlags |= IMSF_FACE_REMAP;
if(mMeshData.mAdjacencies) serialFlags |= IMSF_ADJACENCIES;
if (params.buildGPUData) serialFlags |= IMSF_GRB_DATA;
if (mMeshData.mGRB_faceRemapInverse) serialFlags |= IMSF_GRB_INV_REMAP;
// Compute serialization flags for indices
PxU32 maxIndex=0;
const IndexedTriangle32* tris = reinterpret_cast<const IndexedTriangle32*>(mMeshData.mTriangles);
for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
{
if(tris[i].mRef[0]>maxIndex) maxIndex = tris[i].mRef[0];
if(tris[i].mRef[1]>maxIndex) maxIndex = tris[i].mRef[1];
if(tris[i].mRef[2]>maxIndex) maxIndex = tris[i].mRef[2];
}
bool enableSdf = mMeshData.mSdfData.mSdf ? true : false;
if(enableSdf) serialFlags |= IMSF_SDF;
bool enableInertia = (params.meshPreprocessParams & PxMeshPreprocessingFlag::eENABLE_INERTIA) || enableSdf;
if(enableInertia)
serialFlags |= IMSF_INERTIA;
bool force32 = (params.meshPreprocessParams & PxMeshPreprocessingFlag::eFORCE_32BIT_INDICES);
if (maxIndex <= 0xFFFF && !force32)
serialFlags |= (maxIndex <= 0xFF ? IMSF_8BIT_INDICES : IMSF_16BIT_INDICES);
bool enableVertexMapping = (params.buildGPUData || (params.meshPreprocessParams & PxMeshPreprocessingFlag::eENABLE_VERT_MAPPING));
if (enableVertexMapping)
serialFlags |= IMSF_VERT_MAPPING;
writeDword(serialFlags, platformMismatch, stream);
// Export mesh
writeDword(mMeshData.mNbVertices, platformMismatch, stream);
writeDword(mMeshData.mNbTriangles, platformMismatch, stream);
writeFloatBuffer(&mMeshData.mVertices->x, mMeshData.mNbVertices*3, platformMismatch, stream);
if(serialFlags & IMSF_8BIT_INDICES)
{
const PxU32* indices = tris->mRef;
for(PxU32 i=0;i<mMeshData.mNbTriangles*3;i++)
{
PxI8 data = PxI8(indices[i]);
stream.write(&data, sizeof(PxU8));
}
}
else if(serialFlags & IMSF_16BIT_INDICES)
{
const PxU32* indices = tris->mRef;
for(PxU32 i=0;i<mMeshData.mNbTriangles*3;i++)
writeWord(PxTo16(indices[i]), platformMismatch, stream);
}
else
writeIntBuffer(tris->mRef, mMeshData.mNbTriangles*3, platformMismatch, stream);
if(mMeshData.mMaterialIndices)
writeWordBuffer(mMeshData.mMaterialIndices, mMeshData.mNbTriangles, platformMismatch, stream);
if(mMeshData.mFaceRemap)
{
PxU32 maxId = computeMaxIndex(mMeshData.mFaceRemap, mMeshData.mNbTriangles);
writeDword(maxId, platformMismatch, stream);
storeIndices(maxId, mMeshData.mNbTriangles, mMeshData.mFaceRemap, stream, platformMismatch);
// writeIntBuffer(mMeshData.mFaceRemap, mMeshData.mNbTriangles, platformMismatch, stream);
}
if(mMeshData.mAdjacencies)
writeIntBuffer(mMeshData.mAdjacencies, mMeshData.mNbTriangles*3, platformMismatch, stream);
// Export midphase structure
saveMidPhaseStructure(stream, platformMismatch);
// Export local bounds
writeFloat(mMeshData.mGeomEpsilon, platformMismatch, stream);
writeFloat(mMeshData.mAABB.minimum.x, platformMismatch, stream);
writeFloat(mMeshData.mAABB.minimum.y, platformMismatch, stream);
writeFloat(mMeshData.mAABB.minimum.z, platformMismatch, stream);
writeFloat(mMeshData.mAABB.maximum.x, platformMismatch, stream);
writeFloat(mMeshData.mAABB.maximum.y, platformMismatch, stream);
writeFloat(mMeshData.mAABB.maximum.z, platformMismatch, stream);
if(mMeshData.mExtraTrigData)
{
writeDword(mMeshData.mNbTriangles, platformMismatch, stream);
// No need to convert those bytes
stream.write(mMeshData.mExtraTrigData, mMeshData.mNbTriangles*sizeof(PxU8));
}
else
writeDword(0, platformMismatch, stream);
// GRB write -----------------------------------------------------------------
if (params.buildGPUData)
{
const PxU32* indices = reinterpret_cast<PxU32*>(mMeshData.mGRB_primIndices);
if (serialFlags & IMSF_8BIT_INDICES)
{
for (PxU32 i = 0; i<mMeshData.mNbTriangles * 3; i++)
{
PxI8 data = PxI8(indices[i]);
stream.write(&data, sizeof(PxU8));
}
}
else if (serialFlags & IMSF_16BIT_INDICES)
{
for (PxU32 i = 0; i<mMeshData.mNbTriangles * 3; i++)
writeWord(PxTo16(indices[i]), platformMismatch, stream);
}
else
writeIntBuffer(indices, mMeshData.mNbTriangles * 3, platformMismatch, stream);
//writeIntBuffer(reinterpret_cast<PxU32*>(mMeshData.mGRB_triIndices), , mMeshData.mNbTriangles*3, platformMismatch, stream);
//writeIntBuffer(reinterpret_cast<PxU32 *>(mMeshData.mGRB_triIndices), mMeshData.mNbTriangles*4, platformMismatch, stream);
writeIntBuffer(reinterpret_cast<PxU32 *>(mMeshData.mGRB_primAdjacencies), mMeshData.mNbTriangles*4, platformMismatch, stream);
writeIntBuffer(mMeshData.mGRB_faceRemap, mMeshData.mNbTriangles, platformMismatch, stream);
if(mMeshData.mGRB_faceRemapInverse)
writeIntBuffer(mMeshData.mGRB_faceRemapInverse, mMeshData.mNbTriangles, platformMismatch, stream);
//Export GPU midphase structure
BV32Tree* bv32Tree = mMeshData.mGRB_BV32Tree;
BV32TriangleMeshBuilder::saveMidPhaseStructure(bv32Tree, stream, platformMismatch);
//Export vertex mapping
if (enableVertexMapping)
{
writeDword(mMeshData.mNbTrianglesReferences, platformMismatch, stream);
stream.write(mMeshData.mAccumulatedTrianglesRef, mMeshData.mNbVertices * sizeof(PxU32));
stream.write(mMeshData.mTrianglesReferences, mMeshData.mNbTrianglesReferences * sizeof(PxU32));
}
}
// End of GRB write ----------------------------------------------------------
// Export sdf values
if (enableSdf)
{
writeFloat(mMeshData.mSdfData.mMeshLower.x, platformMismatch, stream);
writeFloat(mMeshData.mSdfData.mMeshLower.y, platformMismatch, stream);
writeFloat(mMeshData.mSdfData.mMeshLower.z, platformMismatch, stream);
writeFloat(mMeshData.mSdfData.mSpacing, platformMismatch, stream);
writeDword(mMeshData.mSdfData.mDims.x, platformMismatch, stream);
writeDword(mMeshData.mSdfData.mDims.y, platformMismatch, stream);
writeDword(mMeshData.mSdfData.mDims.z, platformMismatch, stream);
writeDword(mMeshData.mSdfData.mNumSdfs, platformMismatch, stream);
writeDword(mMeshData.mSdfData.mNumSubgridSdfs, platformMismatch, stream);
writeDword(mMeshData.mSdfData.mNumStartSlots, platformMismatch, stream);
writeDword(mMeshData.mSdfData.mSubgridSize, platformMismatch, stream);
writeDword(mMeshData.mSdfData.mSdfSubgrids3DTexBlockDim.x, platformMismatch, stream);
writeDword(mMeshData.mSdfData.mSdfSubgrids3DTexBlockDim.y, platformMismatch, stream);
writeDword(mMeshData.mSdfData.mSdfSubgrids3DTexBlockDim.z, platformMismatch, stream);
writeFloat(mMeshData.mSdfData.mSubgridsMinSdfValue, platformMismatch, stream);
writeFloat(mMeshData.mSdfData.mSubgridsMaxSdfValue, platformMismatch, stream);
writeDword(mMeshData.mSdfData.mBytesPerSparsePixel, platformMismatch, stream);
writeFloatBuffer(mMeshData.mSdfData.mSdf, mMeshData.mSdfData.mNumSdfs, platformMismatch, stream);
writeByteBuffer(mMeshData.mSdfData.mSubgridSdf, mMeshData.mSdfData.mNumSubgridSdfs, stream);
writeIntBuffer(mMeshData.mSdfData.mSubgridStartSlots, mMeshData.mSdfData.mNumStartSlots, platformMismatch, stream);
}
//Export Inertia tensor
if(enableInertia)
{
writeFloat(mMeshData.mMass, platformMismatch, stream);
writeFloatBuffer(reinterpret_cast<const PxF32*>(&mMeshData.mInertia), 9, platformMismatch, stream);
writeFloatBuffer(&mMeshData.mLocalCenterOfMass.x, 3, platformMismatch, stream);
}
return true;
}
///////////////////////////////////////////////////////////////////////////////
#if PX_VC
#pragma warning(push)
#pragma warning(disable:4996) // permitting use of gatherStrided until we have a replacement.
#endif
#if PX_CHECKED
bool checkInputFloats(PxU32 nb, const float* values, const char* file, PxU32 line, const char* errorMsg)
{
while(nb--)
{
if(!PxIsFinite(*values++))
return PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, file, line, errorMsg);
}
return true;
}
#endif
bool TriangleMeshBuilder::importMesh(const PxTriangleMeshDesc& desc, PxTriangleMeshCookingResult::Enum* condition, bool validate)
{
//convert and clean the input mesh
//this is where the mesh data gets copied from user mem to our mem
PxVec3* verts = mMeshData.allocateVertices(desc.points.count);
IndexedTriangle32* tris = reinterpret_cast<IndexedTriangle32*>(mMeshData.allocateTriangles(desc.triangles.count, true, PxU32(mParams.buildGPUData)));
//copy, and compact to get rid of strides:
immediateCooking::gatherStrided(desc.points.data, verts, mMeshData.mNbVertices, sizeof(PxVec3), desc.points.stride);
#if PX_CHECKED
// PT: check all input vertices are valid
if(!checkInputFloats(desc.points.count*3, &verts->x, PX_FL, "input mesh contains corrupted vertex data"))
return false;
#endif
//for trigs index stride conversion and eventual reordering is also needed, I don't think flexicopy can do that for us.
IndexedTriangle32* dest = tris;
const IndexedTriangle32* pastLastDest = tris + mMeshData.mNbTriangles;
const PxU8* source = reinterpret_cast<const PxU8*>(desc.triangles.data);
//4 combos of 16 vs 32 and flip vs no flip
PxU32 c = (desc.flags & PxMeshFlag::eFLIPNORMALS)?PxU32(1):0;
if (desc.flags & PxMeshFlag::e16_BIT_INDICES)
{
//index stride conversion is also needed, I don't think flexicopy can do that for us.
while (dest < pastLastDest)
{
const PxU16 * trig16 = reinterpret_cast<const PxU16*>(source);
dest->mRef[0] = trig16[0];
dest->mRef[1] = trig16[1+c];
dest->mRef[2] = trig16[2-c];
dest ++;
source += desc.triangles.stride;
}
}
else
{
while (dest < pastLastDest)
{
const PxU32 * trig32 = reinterpret_cast<const PxU32*>(source);
dest->mRef[0] = trig32[0];
dest->mRef[1] = trig32[1+c];
dest->mRef[2] = trig32[2-c];
dest ++;
source += desc.triangles.stride;
}
}
//copy the material index list if any:
if(desc.materialIndices.data)
{
PxMaterialTableIndex* materials = mMeshData.allocateMaterials();
immediateCooking::gatherStrided(desc.materialIndices.data, materials, mMeshData.mNbTriangles, sizeof(PxMaterialTableIndex), desc.materialIndices.stride);
// Check material indices
for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
PX_ASSERT(materials[i]!=0xffff);
}
// Clean the mesh using ICE's MeshBuilder
// This fixes the bug in ConvexTest06 where the inertia tensor computation fails for a mesh => it works with a clean mesh
if (!(mParams.meshPreprocessParams & PxMeshPreprocessingFlag::eDISABLE_CLEAN_MESH) || validate)
{
if(!cleanMesh(validate, condition))
return false;
}
else
{
// we need to fill the remap table if no cleaning was done
if(mParams.suppressTriangleMeshRemapTable == false)
{
PX_ASSERT(mMeshData.mFaceRemap == NULL);
mMeshData.mFaceRemap = PX_ALLOCATE(PxU32, mMeshData.mNbTriangles, "mFaceRemap");
for (PxU32 i = 0; i < mMeshData.mNbTriangles; i++)
mMeshData.mFaceRemap[i] = i;
}
}
if (mParams.meshPreprocessParams & PxMeshPreprocessingFlag::eENABLE_INERTIA)
{
buildInertiaTensor();
}
// Copy sdf data if enabled
if (desc.sdfDesc)
{
PxArray<PxReal> sdfData;
PxArray<PxU8> sdfDataSubgrids;
PxArray<PxU32> sdfSubgridsStartSlots;
PxTriangleMeshDesc newDesc;
newDesc.points.count = mMeshData.mNbVertices;
newDesc.points.stride = sizeof(PxVec3);
newDesc.points.data = mMeshData.mVertices;
newDesc.triangles.count = mMeshData.mNbTriangles;
newDesc.triangles.stride = sizeof(PxU32) * 3;
newDesc.triangles.data = mMeshData.mTriangles;
newDesc.flags &= (~PxMeshFlag::e16_BIT_INDICES);
newDesc.sdfDesc = desc.sdfDesc;
buildSDF(newDesc, sdfData, sdfDataSubgrids, sdfSubgridsStartSlots);
PxSDFDesc& sdfDesc = *desc.sdfDesc;
PxReal* sdf = mMeshData.mSdfData.allocateSdfs(sdfDesc.meshLower, sdfDesc.spacing, sdfDesc.dims.x, sdfDesc.dims.y, sdfDesc.dims.z,
sdfDesc.subgridSize, sdfDesc.sdfSubgrids3DTexBlockDim.x, sdfDesc.sdfSubgrids3DTexBlockDim.y, sdfDesc.sdfSubgrids3DTexBlockDim.z,
sdfDesc.subgridsMinSdfValue, sdfDesc.subgridsMaxSdfValue, sdfDesc.bitsPerSubgridPixel);
if (sdfDesc.subgridSize > 0)
{
//Sparse sdf
immediateCooking::gatherStrided(sdfDesc.sdf.data, sdf, sdfDesc.sdf.count, sizeof(PxReal), sdfDesc.sdf.stride);
immediateCooking::gatherStrided(sdfDesc.sdfSubgrids.data, mMeshData.mSdfData.mSubgridSdf,
sdfDesc.sdfSubgrids.count,
sizeof(PxU8), sdfDesc.sdfSubgrids.stride);
immediateCooking::gatherStrided(sdfDesc.sdfStartSlots.data, mMeshData.mSdfData.mSubgridStartSlots, sdfDesc.sdfStartSlots.count, sizeof(PxU32), sdfDesc.sdfStartSlots.stride);
}
else
{
//copy, and compact to get rid of strides:
immediateCooking::gatherStrided(sdfDesc.sdf.data, sdf, sdfDesc.dims.x*sdfDesc.dims.y*sdfDesc.dims.z, sizeof(PxReal), sdfDesc.sdf.stride);
}
//Make sure there is always a valid inertia tensor for meshes with an SDF
buildInertiaTensorFromSDF();
#if PX_CHECKED
// SN: check all input sdf values are valid
if(!checkInputFloats(sdfDesc.sdf.count, sdf, PX_FL, "input sdf contains corrupted data"))
return false;
#endif
}
return true;
}
#if PX_VC
#pragma warning(pop)
#endif
///////////////////////////////////////////////////////////////////////////////
void TriangleMeshBuilder::checkMeshIndicesSize()
{
TriangleMeshData& m = mMeshData;
// check if we can change indices from 32bits to 16bits
if(m.mNbVertices <= 0xffff && !m.has16BitIndices())
{
const PxU32 numTriangles = m.mNbTriangles;
PxU32* PX_RESTRICT indices32 = reinterpret_cast<PxU32*> (m.mTriangles);
PxU32* PX_RESTRICT grbIndices32 = reinterpret_cast<PxU32*>(m.mGRB_primIndices);
m.mTriangles = 0; // force a realloc
m.allocateTriangles(numTriangles, false, grbIndices32 != NULL ? 1u : 0u);
PX_ASSERT(m.has16BitIndices()); // realloc'ing without the force32bit flag changed it.
PxU16* PX_RESTRICT indices16 = reinterpret_cast<PxU16*> (m.mTriangles);
for (PxU32 i = 0; i < numTriangles * 3; i++)
indices16[i] = PxTo16(indices32[i]);
PX_FREE(indices32);
if (grbIndices32)
{
PxU16* PX_RESTRICT grbIndices16 = reinterpret_cast<PxU16*> (m.mGRB_primIndices);
for (PxU32 i = 0; i < numTriangles * 3; i++)
grbIndices16[i] = PxTo16(grbIndices32[i]);
}
PX_FREE(grbIndices32);
onMeshIndexFormatChange();
}
}
///////////////////////////////////////////////////////////////////////////////
BV4TriangleMeshBuilder::BV4TriangleMeshBuilder(const PxCookingParams& params) : TriangleMeshBuilder(mData, params)
{
}
BV4TriangleMeshBuilder::~BV4TriangleMeshBuilder()
{
}
void BV4TriangleMeshBuilder::onMeshIndexFormatChange()
{
IndTri32* triangles32 = NULL;
IndTri16* triangles16 = NULL;
if(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES)
triangles16 = reinterpret_cast<IndTri16*>(mMeshData.mTriangles);
else
triangles32 = reinterpret_cast<IndTri32*>(mMeshData.mTriangles);
mData.mMeshInterface.setPointers(triangles32, triangles16, mMeshData.mVertices);
}
bool BV4TriangleMeshBuilder::createMidPhaseStructure()
{
GU_PROFILE_ZONE("createMidPhaseStructure_BV4")
const float gBoxEpsilon = 2e-4f;
// const float gBoxEpsilon = 0.1f;
mData.mMeshInterface.initRemap();
mData.mMeshInterface.setNbVertices(mMeshData.mNbVertices);
mData.mMeshInterface.setNbTriangles(mMeshData.mNbTriangles);
IndTri32* triangles32 = NULL;
IndTri16* triangles16 = NULL;
if (mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES)
triangles16 = reinterpret_cast<IndTri16*>(mMeshData.mTriangles);
else
triangles32 = reinterpret_cast<IndTri32*>(mMeshData.mTriangles);
mData.mMeshInterface.setPointers(triangles32, triangles16, mMeshData.mVertices);
PX_ASSERT(mParams.midphaseDesc.getType() == PxMeshMidPhase::eBVH34);
const PxU32 nbTrisPerLeaf = mParams.midphaseDesc.mBVH34Desc.numPrimsPerLeaf;
const bool quantized = mParams.midphaseDesc.mBVH34Desc.quantized;
const PxBVH34BuildStrategy::Enum strategy = mParams.midphaseDesc.mBVH34Desc.buildStrategy;
BV4_BuildStrategy gubs = BV4_SPLATTER_POINTS_SPLIT_GEOM_CENTER; // Default
if(strategy==PxBVH34BuildStrategy::eSAH)
gubs = BV4_SAH;
else if(strategy==PxBVH34BuildStrategy::eFAST)
gubs = BV4_SPLATTER_POINTS;
if(!BuildBV4Ex(mData.mBV4Tree, mData.mMeshInterface, gBoxEpsilon, nbTrisPerLeaf, quantized, gubs))
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "BV4 tree failed to build.");
{
GU_PROFILE_ZONE("..BV4 remap")
// remapTopology(mData.mMeshInterface);
const PxU32* order = mData.mMeshInterface.getRemap();
if(mMeshData.mMaterialIndices)
{
PxMaterialTableIndex* newMat = PX_ALLOCATE(PxMaterialTableIndex, mMeshData.mNbTriangles, "mMaterialIndices");
for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
newMat[i] = mMeshData.mMaterialIndices[order[i]];
PX_FREE(mMeshData.mMaterialIndices);
mMeshData.mMaterialIndices = newMat;
}
if (!mParams.suppressTriangleMeshRemapTable || mParams.buildGPUData)
{
PxU32* newMap = PX_ALLOCATE(PxU32, mMeshData.mNbTriangles, "mFaceRemap");
for(PxU32 i=0;i<mMeshData.mNbTriangles;i++)
newMap[i] = mMeshData.mFaceRemap ? mMeshData.mFaceRemap[order[i]] : order[i];
PX_FREE(mMeshData.mFaceRemap);
mMeshData.mFaceRemap = newMap;
}
mData.mMeshInterface.releaseRemap();
}
return true;
}
void BV4TriangleMeshBuilder::saveMidPhaseStructure(PxOutputStream& stream, bool mismatch) const
{
// PT: in version 1 we defined "mismatch" as:
// const bool mismatch = (littleEndian() == 1);
// i.e. the data was *always* saved to file in big-endian format no matter what.
// In version>1 we now do the same as for other structures in the SDK: the data is
// exported either as little or big-endian depending on the passed parameter.
const PxU32 bv4StructureVersion = 3;
writeChunk('B', 'V', '4', ' ', stream);
writeDword(bv4StructureVersion, mismatch, stream);
writeFloat(mData.mBV4Tree.mLocalBounds.mCenter.x, mismatch, stream);
writeFloat(mData.mBV4Tree.mLocalBounds.mCenter.y, mismatch, stream);
writeFloat(mData.mBV4Tree.mLocalBounds.mCenter.z, mismatch, stream);
writeFloat(mData.mBV4Tree.mLocalBounds.mExtentsMagnitude, mismatch, stream);
writeDword(mData.mBV4Tree.mInitData, mismatch, stream);
writeFloat(mData.mBV4Tree.mCenterOrMinCoeff.x, mismatch, stream);
writeFloat(mData.mBV4Tree.mCenterOrMinCoeff.y, mismatch, stream);
writeFloat(mData.mBV4Tree.mCenterOrMinCoeff.z, mismatch, stream);
writeFloat(mData.mBV4Tree.mExtentsOrMaxCoeff.x, mismatch, stream);
writeFloat(mData.mBV4Tree.mExtentsOrMaxCoeff.y, mismatch, stream);
writeFloat(mData.mBV4Tree.mExtentsOrMaxCoeff.z, mismatch, stream);
// PT: version 3
writeDword(PxU32(mData.mBV4Tree.mQuantized), mismatch, stream);
writeDword(mData.mBV4Tree.mNbNodes, mismatch, stream);
#ifdef GU_BV4_USE_SLABS
// PT: we use BVDataPacked to get the size computation right, but we're dealing with BVDataSwizzled here!
const PxU32 NodeSize = mData.mBV4Tree.mQuantized ? sizeof(BVDataPackedQ) : sizeof(BVDataPackedNQ);
stream.write(mData.mBV4Tree.mNodes, NodeSize*mData.mBV4Tree.mNbNodes);
PX_ASSERT(!mismatch);
#else
#error Not implemented
#endif
}
///////////////////////////////////////////////////////////////////////////////
bool BV32TriangleMeshBuilder::createMidPhaseStructure(const PxCookingParams& params, TriangleMeshData& meshData, BV32Tree& bv32Tree)
{
GU_PROFILE_ZONE("createMidPhaseStructure_BV32")
const float gBoxEpsilon = 2e-4f;
SourceMesh meshInterface;
// const float gBoxEpsilon = 0.1f;
meshInterface.initRemap();
meshInterface.setNbVertices(meshData.mNbVertices);
meshInterface.setNbTriangles(meshData.mNbTriangles);
PX_ASSERT(!(meshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
IndTri32* triangles32 = reinterpret_cast<IndTri32*>(meshData.mGRB_primIndices);
meshInterface.setPointers(triangles32, NULL, meshData.mVertices);
const PxU32 nbTrisPerLeaf = 32;
if (!BuildBV32Ex(bv32Tree, meshInterface, gBoxEpsilon, nbTrisPerLeaf))
return outputError<PxErrorCode::eINTERNAL_ERROR>(__LINE__, "BV32 tree failed to build.");
{
GU_PROFILE_ZONE("..BV32 remap")
const PxU32* order = meshInterface.getRemap();
if (!params.suppressTriangleMeshRemapTable || params.buildGPUData)
{
PxU32* newMap = PX_ALLOCATE(PxU32, meshData.mNbTriangles, "mGRB_faceRemap");
for (PxU32 i = 0; i<meshData.mNbTriangles; i++)
newMap[i] = meshData.mGRB_faceRemap ? meshData.mGRB_faceRemap[order[i]] : order[i];
PX_FREE(meshData.mGRB_faceRemap);
meshData.mGRB_faceRemap = newMap;
if (!params.suppressTriangleMeshRemapTable)
{
PxU32* newMapInverse = PX_ALLOCATE(PxU32, meshData.mNbTriangles, "mGRB_faceRemapInverse");
for (PxU32 i = 0; i < meshData.mNbTriangles; ++i)
newMapInverse[meshData.mGRB_faceRemap[i]] = i;
PX_FREE(meshData.mGRB_faceRemapInverse);
meshData.mGRB_faceRemapInverse = newMapInverse;
}
}
meshInterface.releaseRemap();
}
return true;
}
void BV32TriangleMeshBuilder::saveMidPhaseStructure(BV32Tree* bv32Tree, PxOutputStream& stream, bool mismatch)
{
// PT: in version 1 we defined "mismatch" as:
// const bool mismatch = (littleEndian() == 1);
// i.e. the data was *always* saved to file in big-endian format no matter what.
// In version>1 we now do the same as for other structures in the SDK: the data is
// exported either as little or big-endian depending on the passed parameter.
const PxU32 bv32StructureVersion = 2;
writeChunk('B', 'V', '3', '2', stream);
writeDword(bv32StructureVersion, mismatch, stream);
writeFloat(bv32Tree->mLocalBounds.mCenter.x, mismatch, stream);
writeFloat(bv32Tree->mLocalBounds.mCenter.y, mismatch, stream);
writeFloat(bv32Tree->mLocalBounds.mCenter.z, mismatch, stream);
writeFloat(bv32Tree->mLocalBounds.mExtentsMagnitude, mismatch, stream);
writeDword(bv32Tree->mInitData, mismatch, stream);
writeDword(bv32Tree->mNbPackedNodes, mismatch, stream);
PX_ASSERT(bv32Tree->mNbPackedNodes > 0);
for (PxU32 i = 0; i < bv32Tree->mNbPackedNodes; ++i)
{
BV32DataPacked& node = bv32Tree->mPackedNodes[i];
const PxU32 nbElements = node.mNbNodes * 4;
writeDword(node.mNbNodes, mismatch, stream);
writeDword(node.mDepth, mismatch, stream);
WriteDwordBuffer(node.mData, node.mNbNodes, mismatch, stream);
writeFloatBuffer(&node.mMin[0].x, nbElements, mismatch, stream);
writeFloatBuffer(&node.mMax[0].x, nbElements, mismatch, stream);
}
writeDword(bv32Tree->mMaxTreeDepth, mismatch, stream);
PX_ASSERT(bv32Tree->mMaxTreeDepth > 0);
for (PxU32 i = 0; i < bv32Tree->mMaxTreeDepth; ++i)
{
BV32DataDepthInfo& info = bv32Tree->mTreeDepthInfo[i];
writeDword(info.offset, mismatch, stream);
writeDword(info.count, mismatch, stream);
}
WriteDwordBuffer(bv32Tree->mRemapPackedNodeIndexWithDepth, bv32Tree->mNbPackedNodes, mismatch, stream);
}
///////////////////////////////////////////////////////////////////////////////
RTreeTriangleMeshBuilder::RTreeTriangleMeshBuilder(const PxCookingParams& params) : TriangleMeshBuilder(mData, params)
{
}
RTreeTriangleMeshBuilder::~RTreeTriangleMeshBuilder()
{
}
struct RTreeCookerRemap : RTreeCooker::RemapCallback
{
PxU32 mNbTris;
RTreeCookerRemap(PxU32 numTris) : mNbTris(numTris)
{
}
virtual void remap(PxU32* val, PxU32 start, PxU32 leafCount)
{
PX_ASSERT(leafCount > 0);
PX_ASSERT(leafCount <= 16); // sanity check
PX_ASSERT(start < mNbTris);
PX_ASSERT(start+leafCount <= mNbTris);
PX_ASSERT(val);
LeafTriangles lt;
// here we remap from ordered leaf index in the rtree to index in post-remap in triangles
// this post-remap will happen later
lt.SetData(leafCount, start);
*val = lt.Data;
}
};
bool RTreeTriangleMeshBuilder::createMidPhaseStructure()
{
GU_PROFILE_ZONE("createMidPhaseStructure_RTREE")
const PxReal meshSizePerformanceTradeOff = mParams.midphaseDesc.mBVH33Desc.meshSizePerformanceTradeOff;
const PxMeshCookingHint::Enum meshCookingHint = mParams.midphaseDesc.mBVH33Desc.meshCookingHint;
PxArray<PxU32> resultPermute;
RTreeCookerRemap rc(mMeshData.mNbTriangles);
RTreeCooker::buildFromTriangles(
mData.mRTree,
mMeshData.mVertices, mMeshData.mNbVertices,
(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES) ? reinterpret_cast<PxU16*>(mMeshData.mTriangles) : NULL,
!(mMeshData.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES) ? reinterpret_cast<PxU32*>(mMeshData.mTriangles) : NULL,
mMeshData.mNbTriangles, resultPermute, &rc, meshSizePerformanceTradeOff, meshCookingHint);
PX_ASSERT(resultPermute.size() == mMeshData.mNbTriangles);
remapTopology(resultPermute.begin());
return true;
}
void RTreeTriangleMeshBuilder::saveMidPhaseStructure(PxOutputStream& stream, bool mismatch) const
{
// PT: in version 1 we defined "mismatch" as:
// const bool mismatch = (littleEndian() == 1);
// i.e. the data was *always* saved to file in big-endian format no matter what.
// In version>1 we now do the same as for other structures in the SDK: the data is
// exported either as little or big-endian depending on the passed parameter.
const PxU32 rtreeStructureVersion = 2;
// save the RTree root structure followed immediately by RTreePage pages to an output stream
writeChunk('R', 'T', 'R', 'E', stream);
writeDword(rtreeStructureVersion, mismatch, stream);
const RTree& d = mData.mRTree;
writeFloatBuffer(&d.mBoundsMin.x, 4, mismatch, stream);
writeFloatBuffer(&d.mBoundsMax.x, 4, mismatch, stream);
writeFloatBuffer(&d.mInvDiagonal.x, 4, mismatch, stream);
writeFloatBuffer(&d.mDiagonalScaler.x, 4, mismatch, stream);
writeDword(d.mPageSize, mismatch, stream);
writeDword(d.mNumRootPages, mismatch, stream);
writeDword(d.mNumLevels, mismatch, stream);
writeDword(d.mTotalNodes, mismatch, stream);
writeDword(d.mTotalPages, mismatch, stream);
PxU32 unused = 0; writeDword(unused, mismatch, stream); // backwards compatibility
for (PxU32 j = 0; j < d.mTotalPages; j++)
{
writeFloatBuffer(d.mPages[j].minx, RTREE_N, mismatch, stream);
writeFloatBuffer(d.mPages[j].miny, RTREE_N, mismatch, stream);
writeFloatBuffer(d.mPages[j].minz, RTREE_N, mismatch, stream);
writeFloatBuffer(d.mPages[j].maxx, RTREE_N, mismatch, stream);
writeFloatBuffer(d.mPages[j].maxy, RTREE_N, mismatch, stream);
writeFloatBuffer(d.mPages[j].maxz, RTREE_N, mismatch, stream);
WriteDwordBuffer(d.mPages[j].ptrs, RTREE_N, mismatch, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
bool immediateCooking::validateTriangleMesh(const PxCookingParams& params, const PxTriangleMeshDesc& desc)
{
// cooking code does lots of float bitwise reinterpretation that generates exceptions
PX_FPU_GUARD;
if(!desc.isValid())
return outputError<PxErrorCode::eINVALID_PARAMETER>(__LINE__, "Cooking::validateTriangleMesh: user-provided triangle mesh descriptor is invalid!");
// PT: validation code doesn't look at midphase data, so ideally we wouldn't build the midphase structure at all here.
if(params.midphaseDesc.getType() == PxMeshMidPhase::eBVH33)
{
RTreeTriangleMeshBuilder builder(params);
return builder.loadFromDesc(desc, NULL, true /*doValidate*/);
}
else if(params.midphaseDesc.getType() == PxMeshMidPhase::eBVH34)
{
BV4TriangleMeshBuilder builder(params);
return builder.loadFromDesc(desc, NULL, true /*doValidate*/);
}
else
return false;
}
///////////////////////////////////////////////////////////////////////////////
PxTriangleMesh* immediateCooking::createTriangleMesh(const PxCookingParams& params, const PxTriangleMeshDesc& desc, PxInsertionCallback& insertionCallback, PxTriangleMeshCookingResult::Enum* condition)
{
struct Local
{
static PxTriangleMesh* createTriangleMesh(const PxCookingParams& cookingParams_, TriangleMeshBuilder& builder, const PxTriangleMeshDesc& desc_, PxInsertionCallback& insertionCallback_, PxTriangleMeshCookingResult::Enum* condition_)
{
// cooking code does lots of float bitwise reinterpretation that generates exceptions
PX_FPU_GUARD;
if(condition_)
*condition_ = PxTriangleMeshCookingResult::eSUCCESS;
if(!builder.loadFromDesc(desc_, condition_, false))
return NULL;
// check if the indices can be moved from 32bits to 16bits
if(!(cookingParams_.meshPreprocessParams & PxMeshPreprocessingFlag::eFORCE_32BIT_INDICES))
builder.checkMeshIndicesSize();
PxConcreteType::Enum type;
if(builder.getMidphaseID()==PxMeshMidPhase::eBVH33)
type = PxConcreteType::eTRIANGLE_MESH_BVH33;
else
type = PxConcreteType::eTRIANGLE_MESH_BVH34;
return static_cast<PxTriangleMesh*>(insertionCallback_.buildObjectFromData(type, &builder.getMeshData()));
}
};
if(params.midphaseDesc.getType() == PxMeshMidPhase::eBVH33)
{
RTreeTriangleMeshBuilder builder(params);
return Local::createTriangleMesh(params, builder, desc, insertionCallback, condition);
}
else
{
BV4TriangleMeshBuilder builder(params);
return Local::createTriangleMesh(params, builder, desc, insertionCallback, condition);
}
}
///////////////////////////////////////////////////////////////////////////////
bool immediateCooking::cookTriangleMesh(const PxCookingParams& params, const PxTriangleMeshDesc& desc, PxOutputStream& stream, PxTriangleMeshCookingResult::Enum* condition)
{
struct Local
{
static bool cookTriangleMesh(const PxCookingParams& cookingParams_, TriangleMeshBuilder& builder, const PxTriangleMeshDesc& desc_, PxOutputStream& stream_, PxTriangleMeshCookingResult::Enum* condition_)
{
// cooking code does lots of float bitwise reinterpretation that generates exceptions
PX_FPU_GUARD;
if(condition_)
*condition_ = PxTriangleMeshCookingResult::eSUCCESS;
if(!builder.loadFromDesc(desc_, condition_, false))
return false;
builder.save(stream_, immediateCooking::platformMismatch(), cookingParams_);
return true;
}
};
if(params.midphaseDesc.getType() == PxMeshMidPhase::eBVH33)
{
RTreeTriangleMeshBuilder builder(params);
return Local::cookTriangleMesh(params, builder, desc, stream, condition);
}
else
{
BV4TriangleMeshBuilder builder(params);
return Local::cookTriangleMesh(params, builder, desc, stream, condition);
}
}
///////////////////////////////////////////////////////////////////////////////
| 51,144 | C++ | 34.966948 | 233 | 0.736822 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingConvexHullLib.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuCookingConvexHullLib.h"
#include "GuQuantizer.h"
#include "foundation/PxAllocator.h"
#include "foundation/PxBounds3.h"
#include "foundation/PxMemory.h"
using namespace physx;
using namespace Gu;
namespace local
{
//////////////////////////////////////////////////////////////////////////
// constants
static const float DISTANCE_EPSILON = 0.000001f; // close enough to consider two floating point numbers to be 'the same'.
static const float RESIZE_VALUE = 0.01f; // if the provided points AABB is very thin resize it to this size
//////////////////////////////////////////////////////////////////////////
// checks if points form a valid AABB cube, if not construct a default CUBE
static bool checkPointsAABBValidity(PxU32 numPoints, const PxVec3* points, PxU32 stride , float distanceEpsilon,
float resizeValue, PxU32& vcount, PxVec3* vertices, bool fCheck = false)
{
const char* vtx = reinterpret_cast<const char *> (points);
PxBounds3 bounds;
bounds.setEmpty();
// get the bounding box
for (PxU32 i = 0; i < numPoints; i++)
{
const PxVec3& p = *reinterpret_cast<const PxVec3 *> (vtx);
vtx += stride;
bounds.include(p);
vertices[i] = p;
}
PxVec3 dim = bounds.getDimensions();
PxVec3 center = bounds.getCenter();
// special case, the AABB is very thin or user provided us with only input 2 points
// we construct an AABB cube and return it
if ( dim.x < distanceEpsilon || dim.y < distanceEpsilon || dim.z < distanceEpsilon || numPoints < 3 )
{
float len = FLT_MAX;
// pick the shortest size bigger than the distance epsilon
if ( dim.x > distanceEpsilon && dim.x < len )
len = dim.x;
if ( dim.y > distanceEpsilon && dim.y < len )
len = dim.y;
if ( dim.z > distanceEpsilon && dim.z < len )
len = dim.z;
// if the AABB is small in all dimensions, resize it
if ( len == FLT_MAX )
{
dim = PxVec3(resizeValue);
}
// if one edge is small, set to 1/5th the shortest non-zero edge.
else
{
if ( dim.x < distanceEpsilon )
dim.x = PxMin(len * 0.05f, resizeValue);
else
dim.x *= 0.5f;
if ( dim.y < distanceEpsilon )
dim.y = PxMin(len * 0.05f, resizeValue);
else
dim.y *= 0.5f;
if ( dim.z < distanceEpsilon )
dim.z = PxMin(len * 0.05f, resizeValue);
else
dim.z *= 0.5f;
}
// construct the AABB
const PxVec3 extPos = center + dim;
const PxVec3 extNeg = center - dim;
if(fCheck)
vcount = 0;
vertices[vcount++] = extNeg;
vertices[vcount++] = PxVec3(extPos.x,extNeg.y,extNeg.z);
vertices[vcount++] = PxVec3(extPos.x,extPos.y,extNeg.z);
vertices[vcount++] = PxVec3(extNeg.x,extPos.y,extNeg.z);
vertices[vcount++] = PxVec3(extNeg.x,extNeg.y,extPos.z);
vertices[vcount++] = PxVec3(extPos.x,extNeg.y,extPos.z);
vertices[vcount++] = extPos;
vertices[vcount++] = PxVec3(extNeg.x,extPos.y,extPos.z);
return true; // return cube
}
vcount = numPoints;
return false;
}
}
//////////////////////////////////////////////////////////////////////////
// shift vertices around origin and normalize point cloud, remove duplicates!
bool ConvexHullLib::shiftAndcleanupVertices(PxU32 svcount, const PxVec3* svertices, PxU32 stride,
PxU32& vcount, PxVec3* vertices)
{
mShiftedVerts = PX_ALLOCATE(PxVec3, svcount, "PxVec3");
const char* vtx = reinterpret_cast<const char *> (svertices);
PxBounds3 bounds;
bounds.setEmpty();
// get the bounding box
for (PxU32 i = 0; i < svcount; i++)
{
const PxVec3& p = *reinterpret_cast<const PxVec3 *> (vtx);
vtx += stride;
bounds.include(p);
}
mOriginShift = bounds.getCenter();
vtx = reinterpret_cast<const char *> (svertices);
for (PxU32 i = 0; i < svcount; i++)
{
const PxVec3& p = *reinterpret_cast<const PxVec3 *> (vtx);
vtx += stride;
mShiftedVerts[i] = p - mOriginShift;
}
return cleanupVertices(svcount, mShiftedVerts, sizeof(PxVec3), vcount, vertices);
}
//////////////////////////////////////////////////////////////////////////
// Shift verts/planes in the desc back
void ConvexHullLib::shiftConvexMeshDesc(PxConvexMeshDesc& desc)
{
PX_ASSERT(mConvexMeshDesc.flags & PxConvexFlag::eSHIFT_VERTICES);
PxVec3* points = reinterpret_cast<PxVec3*>(const_cast<void*>(desc.points.data));
for(PxU32 i = 0; i < desc.points.count; i++)
{
points[i] = points[i] + mOriginShift;
}
PxHullPolygon* polygons = reinterpret_cast<PxHullPolygon*>(const_cast<void*>(desc.polygons.data));
for(PxU32 i = 0; i < desc.polygons.count; i++)
{
polygons[i].mPlane[3] -= PxVec3(polygons[i].mPlane[0], polygons[i].mPlane[1], polygons[i].mPlane[2]).dot(mOriginShift);
}
}
//////////////////////////////////////////////////////////////////////////
// normalize point cloud, remove duplicates!
bool ConvexHullLib::cleanupVertices(PxU32 svcount, const PxVec3* svertices, PxU32 stride,
PxU32& vcount, PxVec3* vertices)
{
if (svcount == 0)
return false;
const PxVec3* verticesToClean = svertices;
PxU32 numVerticesToClean = svcount;
Quantizer* quantizer = NULL;
// if quantization is enabled, parse the input vertices and produce new qantized vertices,
// that will be then cleaned the same way
if (mConvexMeshDesc.flags & PxConvexFlag::eQUANTIZE_INPUT)
{
quantizer = createQuantizer();
PxU32 vertsOutCount;
const PxVec3* vertsOut = quantizer->kmeansQuantize3D(svcount, svertices, stride,true, mConvexMeshDesc.quantizedCount, vertsOutCount);
if (vertsOut)
{
numVerticesToClean = vertsOutCount;
verticesToClean = vertsOut;
}
}
const float distanceEpsilon = local::DISTANCE_EPSILON * mCookingParams.scale.length;
const float resizeValue = local::RESIZE_VALUE * mCookingParams.scale.length;
vcount = 0;
// check for the AABB from points, if its very tiny return a resized CUBE
if (local::checkPointsAABBValidity(numVerticesToClean, verticesToClean, stride, distanceEpsilon, resizeValue, vcount, vertices, false))
{
if (quantizer)
quantizer->release();
return true;
}
if(vcount < 4)
return PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "ConvexHullLib::cleanupVertices: Less than four valid vertices were found. Provide at least four valid (e.g. each at a different position) vertices.");
if (quantizer)
quantizer->release();
return true;
}
void ConvexHullLib::swapLargestFace(PxConvexMeshDesc& desc)
{
const PxHullPolygon* polygons = reinterpret_cast<const PxHullPolygon*>(desc.polygons.data);
PxHullPolygon* polygonsOut = const_cast<PxHullPolygon*>(polygons);
PxU32 largestFace = 0;
for (PxU32 i = 1; i < desc.polygons.count; i++)
{
if(polygons[largestFace].mNbVerts < polygons[i].mNbVerts)
largestFace = i;
}
// early exit if no swap needs to be done
if(largestFace == 0)
return;
const PxU32* indices = reinterpret_cast<const PxU32*>(desc.indices.data);
mSwappedIndices = PX_ALLOCATE(PxU32, desc.indices.count, "PxU32");
PxHullPolygon replacedPolygon = polygons[0];
PxHullPolygon largestPolygon = polygons[largestFace];
polygonsOut[0] = polygons[largestFace];
polygonsOut[largestFace] = replacedPolygon;
// relocate indices
PxU16 indexBase = 0;
for (PxU32 i = 0; i < desc.polygons.count; i++)
{
if(i == 0)
{
PxMemCopy(mSwappedIndices, &indices[largestPolygon.mIndexBase],sizeof(PxU32)*largestPolygon.mNbVerts);
polygonsOut[0].mIndexBase = indexBase;
indexBase += largestPolygon.mNbVerts;
}
else
{
if(i == largestFace)
{
PxMemCopy(&mSwappedIndices[indexBase], &indices[replacedPolygon.mIndexBase], sizeof(PxU32)*replacedPolygon.mNbVerts);
polygonsOut[i].mIndexBase = indexBase;
indexBase += replacedPolygon.mNbVerts;
}
else
{
PxMemCopy(&mSwappedIndices[indexBase], &indices[polygons[i].mIndexBase], sizeof(PxU32)*polygons[i].mNbVerts);
polygonsOut[i].mIndexBase = indexBase;
indexBase += polygons[i].mNbVerts;
}
}
}
PX_ASSERT(indexBase == desc.indices.count);
desc.indices.data = mSwappedIndices;
}
ConvexHullLib::~ConvexHullLib()
{
PX_FREE(mSwappedIndices);
PX_FREE(mShiftedVerts);
}
| 9,751 | C++ | 33.097902 | 221 | 0.682699 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingConvexMeshBuilder.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_COOKING_CONVEX_MESH_BUILDER_H
#define GU_COOKING_CONVEX_MESH_BUILDER_H
#include "cooking/PxCooking.h"
#include "GuConvexMeshData.h"
#include "GuCookingConvexPolygonsBuilder.h"
#include "GuSDF.h"
namespace physx
{
class BigConvexData;
namespace Gu
{
struct ConvexHullInitData;
}
//////////////////////////////////////////////////////////////////////////
// Convex mesh builder, creates the convex mesh from given polygons and creates internal data
class ConvexMeshBuilder
{
public:
ConvexMeshBuilder(const bool buildGRBData);
~ConvexMeshBuilder();
// loads the computed or given convex hull from descriptor.
// the descriptor does contain polygons directly, triangles are not allowed
bool build(const PxConvexMeshDesc&, PxU32 gaussMapVertexLimit, bool validateOnly = false, ConvexHullLib* hullLib = NULL);
// save the convex mesh into stream
bool save(PxOutputStream& stream, bool platformMismatch) const;
// copy the convex mesh into internal convex mesh, which can be directly used then
bool copy(Gu::ConvexHullInitData& convexData);
// loads the convex mesh from given polygons
bool loadConvexHull(const PxConvexMeshDesc&, ConvexHullLib* hullLib);
// computed hull polygons from given triangles
bool computeHullPolygons(const PxU32& nbVerts,const PxVec3* verts, const PxU32& nbTriangles, const PxU32* triangles, PxAllocatorCallback& inAllocator,
PxU32& outNbVerts, PxVec3*& outVertices, PxU32& nbIndices, PxU32*& indices, PxU32& nbPolygons, PxHullPolygon*& polygons);
// compute big convex data
bool computeGaussMaps();
// compute mass, inertia tensor
void computeMassInfo(bool lowerPrecision);
// TEST_INTERNAL_OBJECTS
// internal objects
void computeInternalObjects();
bool checkExtentRadiusRatio();
//~TEST_INTERNAL_OBJECTS
void computeSDF(const PxConvexMeshDesc& desc);
// set big convex data
void setBigConvexData(BigConvexData* data) { mBigConvexData = data; }
mutable ConvexPolygonsBuilder hullBuilder;
protected:
Gu::ConvexHullData mHullData;
Gu::SDF* mSdfData;
BigConvexData* mBigConvexData; //!< optional, only for large meshes! PT: redundant with ptr in chull data? Could also be end of other buffer
PxReal mMass; //this is mass assuming a unit density that can be scaled by instances!
PxMat33 mInertia; //in local space of mesh!
};
}
#endif
| 4,183 | C | 40.019607 | 157 | 0.729381 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingConvexHullUtils.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "foundation/PxBounds3.h"
#include "foundation/PxMathUtils.h"
#include "foundation/PxSIMDHelpers.h"
#include "GuCookingConvexHullUtils.h"
#include "GuCookingVolumeIntegration.h"
#include "foundation/PxUtilities.h"
#include "foundation/PxVecMath.h"
#include "GuBox.h"
#include "GuConvexMeshData.h"
using namespace physx;
using namespace aos;
using namespace Gu;
namespace local
{
static const float MIN_ADJACENT_ANGLE = 3.0f; // in degrees - result wont have two adjacent facets within this angle of each other.
static const float MAXDOT_MINANG = cosf(PxDegToRad(MIN_ADJACENT_ANGLE)); // adjacent angle for dot product tests
//////////////////////////////////////////////////////////////////////////
// helper class for ConvexHullCrop
class VertFlag
{
public:
PxU8 planetest;
PxU8 undermap;
PxU8 overmap;
};
//////////////////////////////////////////////////////////////////////////|
// helper class for ConvexHullCrop
class EdgeFlag
{
public:
PxI16 undermap;
};
//////////////////////////////////////////////////////////////////////////|
// helper class for ConvexHullCrop
class Coplanar
{
public:
PxU16 ea;
PxU8 v0;
PxU8 v1;
};
//////////////////////////////////////////////////////////////////////////
// plane test
enum PlaneTestResult
{
eCOPLANAR = 0,
eUNDER = 1 << 0,
eOVER = 1 << 1
};
//////////////////////////////////////////////////////////////////////////
// test where vertex lies in respect to the plane
static PlaneTestResult planeTest(const PxPlane& p, const PxVec3& v, float epsilon)
{
const float a = v.dot(p.n) + p.d;
PlaneTestResult flag = (a > epsilon) ? eOVER : ((a < -epsilon) ? eUNDER : eCOPLANAR);
return flag;
}
// computes the OBB for this set of points relative to this transform matrix. SIMD version
void computeOBBSIMD(PxU32 vcount, const Vec4V* points, Vec4V& sides, const QuatV& rot, Vec4V& trans)
{
PX_ASSERT(vcount);
Vec4V minV = V4Load(PX_MAX_F32);
Vec4V maxV = V4Load(-PX_MAX_F32);
for (PxU32 i = 0; i < vcount; i++)
{
const Vec4V& vertexV = points[i];
const Vec4V t = V4Sub(vertexV, trans);
const Vec4V v = Vec4V_From_Vec3V(QuatRotateInv(rot, Vec3V_From_Vec4V(t)));
minV = V4Min(minV, v);
maxV = V4Max(maxV, v);
}
sides = V4Sub(maxV, minV);
Mat33V tmpMat;
QuatGetMat33V(rot, tmpMat.col0, tmpMat.col1, tmpMat.col2);
const FloatV coe = FLoad(0.5f);
const Vec4V deltaVec = V4Sub(maxV, V4Scale(sides, coe));
const Vec4V t0 = V4Scale(Vec4V_From_Vec3V(tmpMat.col0), V4GetX(deltaVec));
trans = V4Add(trans, t0);
const Vec4V t1 = V4Scale(Vec4V_From_Vec3V(tmpMat.col1), V4GetY(deltaVec));
trans = V4Add(trans, t1);
const Vec4V t2 = V4Scale(Vec4V_From_Vec3V(tmpMat.col2), V4GetZ(deltaVec));
trans = V4Add(trans, t2);
}
}
//////////////////////////////////////////////////////////////////////////
// construct the base cube from given min/max
ConvexHull::ConvexHull(const PxVec3& bmin, const PxVec3& bmax, const PxArray<PxPlane>& inPlanes)
: mInputPlanes(inPlanes)
{
// min max verts of the cube - 8 verts
mVertices.pushBack(PxVec3(bmin.x, bmin.y, bmin.z)); // ---
mVertices.pushBack(PxVec3(bmin.x, bmin.y, bmax.z)); // --+
mVertices.pushBack(PxVec3(bmin.x, bmax.y, bmin.z)); // -+-
mVertices.pushBack(PxVec3(bmin.x, bmax.y, bmax.z)); // -++
mVertices.pushBack(PxVec3(bmax.x, bmin.y, bmin.z)); // +--
mVertices.pushBack(PxVec3(bmax.x, bmin.y, bmax.z)); // +-+
mVertices.pushBack(PxVec3(bmax.x, bmax.y, bmin.z)); // ++-
mVertices.pushBack(PxVec3(bmax.x, bmax.y, bmax.z)); // +++
// cube planes - 6 planes
mFacets.pushBack(PxPlane(PxVec3(-1.f, 0, 0), bmin.x)); // 0,1,3,2
mFacets.pushBack(PxPlane(PxVec3(1.f, 0, 0), -bmax.x)); // 6,7,5,4
mFacets.pushBack(PxPlane(PxVec3(0, -1.f, 0), bmin.y)); // 0,4,5,1
mFacets.pushBack(PxPlane(PxVec3(0, 1.f, 0), -bmax.y)); // 3,7,6,2
mFacets.pushBack(PxPlane(PxVec3(0, 0, -1.f), bmin.z)); // 0,2,6,4
mFacets.pushBack(PxPlane(PxVec3(0, 0, 1.f), -bmax.z)); // 1,5,7,3
// cube edges - 24 edges
mEdges.pushBack(HalfEdge(11, 0, 0));
mEdges.pushBack(HalfEdge(23, 1, 0));
mEdges.pushBack(HalfEdge(15, 3, 0));
mEdges.pushBack(HalfEdge(16, 2, 0));
mEdges.pushBack(HalfEdge(13, 6, 1));
mEdges.pushBack(HalfEdge(21, 7, 1));
mEdges.pushBack(HalfEdge(9, 5, 1));
mEdges.pushBack(HalfEdge(18, 4, 1));
mEdges.pushBack(HalfEdge(19, 0, 2));
mEdges.pushBack(HalfEdge(6, 4, 2));
mEdges.pushBack(HalfEdge(20, 5, 2));
mEdges.pushBack(HalfEdge(0, 1, 2));
mEdges.pushBack(HalfEdge(22, 3, 3));
mEdges.pushBack(HalfEdge(4, 7, 3));
mEdges.pushBack(HalfEdge(17, 6, 3));
mEdges.pushBack(HalfEdge(2, 2, 3));
mEdges.pushBack(HalfEdge(3, 0, 4));
mEdges.pushBack(HalfEdge(14, 2, 4));
mEdges.pushBack(HalfEdge(7, 6, 4));
mEdges.pushBack(HalfEdge(8, 4, 4));
mEdges.pushBack(HalfEdge(10, 1, 5));
mEdges.pushBack(HalfEdge(5, 5, 5));
mEdges.pushBack(HalfEdge(12, 7, 5));
mEdges.pushBack(HalfEdge(1, 3, 5));
}
//////////////////////////////////////////////////////////////////////////
// create the initial convex hull from given OBB
ConvexHull::ConvexHull(const PxVec3& extent, const PxTransform& transform, const PxArray<PxPlane>& inPlanes)
: mInputPlanes(inPlanes)
{
// get the OBB corner points
PxVec3 extentPoints[8];
const PxMat33Padded rot(transform.q);
Gu::computeOBBPoints(extentPoints, transform.p, extent, rot.column0, rot.column1, rot.column2);
mVertices.pushBack(PxVec3(extentPoints[0].x, extentPoints[0].y, extentPoints[0].z)); // ---
mVertices.pushBack(PxVec3(extentPoints[4].x, extentPoints[4].y, extentPoints[4].z)); // --+
mVertices.pushBack(PxVec3(extentPoints[3].x, extentPoints[3].y, extentPoints[3].z)); // -+-
mVertices.pushBack(PxVec3(extentPoints[7].x, extentPoints[7].y, extentPoints[7].z)); // -++
mVertices.pushBack(PxVec3(extentPoints[1].x, extentPoints[1].y, extentPoints[1].z)); // +--
mVertices.pushBack(PxVec3(extentPoints[5].x, extentPoints[5].y, extentPoints[5].z)); // +-+
mVertices.pushBack(PxVec3(extentPoints[2].x, extentPoints[2].y, extentPoints[2].z)); // ++-
mVertices.pushBack(PxVec3(extentPoints[6].x, extentPoints[6].y, extentPoints[6].z)); // +++
// cube planes - 6 planes
PxPlane plane0(extentPoints[0], extentPoints[4], extentPoints[7]); // 0,1,3,2
mFacets.pushBack(PxPlane(plane0.n, plane0.d));
PxPlane plane1(extentPoints[2], extentPoints[6], extentPoints[5]); // 6,7,5,4
mFacets.pushBack(PxPlane(plane1.n, plane1.d));
PxPlane plane2(extentPoints[0], extentPoints[1], extentPoints[5]); // 0,4,5,1
mFacets.pushBack(PxPlane(plane2.n, plane2.d));
PxPlane plane3(extentPoints[7], extentPoints[6], extentPoints[2]); // 3,7,6,2
mFacets.pushBack(PxPlane(plane3.n, plane3.d));
PxPlane plane4(extentPoints[0], extentPoints[3], extentPoints[2]); // 0,2,6,4
mFacets.pushBack(PxPlane(plane4.n, plane4.d));
PxPlane plane5(extentPoints[4], extentPoints[5], extentPoints[6]); // 1,5,7,3
mFacets.pushBack(PxPlane(plane5.n, plane5.d));
// cube edges - 24 edges
mEdges.pushBack(HalfEdge(11, 0, 0));
mEdges.pushBack(HalfEdge(23, 1, 0));
mEdges.pushBack(HalfEdge(15, 3, 0));
mEdges.pushBack(HalfEdge(16, 2, 0));
mEdges.pushBack(HalfEdge(13, 6, 1));
mEdges.pushBack(HalfEdge(21, 7, 1));
mEdges.pushBack(HalfEdge(9, 5, 1));
mEdges.pushBack(HalfEdge(18, 4, 1));
mEdges.pushBack(HalfEdge(19, 0, 2));
mEdges.pushBack(HalfEdge(6, 4, 2));
mEdges.pushBack(HalfEdge(20, 5, 2));
mEdges.pushBack(HalfEdge(0, 1, 2));
mEdges.pushBack(HalfEdge(22, 3, 3));
mEdges.pushBack(HalfEdge(4, 7, 3));
mEdges.pushBack(HalfEdge(17, 6, 3));
mEdges.pushBack(HalfEdge(2, 2, 3));
mEdges.pushBack(HalfEdge(3, 0, 4));
mEdges.pushBack(HalfEdge(14, 2, 4));
mEdges.pushBack(HalfEdge(7, 6, 4));
mEdges.pushBack(HalfEdge(8, 4, 4));
mEdges.pushBack(HalfEdge(10, 1, 5));
mEdges.pushBack(HalfEdge(5, 5, 5));
mEdges.pushBack(HalfEdge(12, 7, 5));
mEdges.pushBack(HalfEdge(1, 3, 5));
}
//////////////////////////////////////////////////////////////////////////
// finds the candidate plane, returns -1 otherwise
PxI32 ConvexHull::findCandidatePlane(float planeTestEpsilon, float epsilon) const
{
PxI32 p = -1;
float md = 0.0f;
PxU32 i, j;
for (i = 0; i < mInputPlanes.size(); i++)
{
float d = 0.0f;
float dmax = 0.0f;
float dmin = 0.0f;
for (j = 0; j < mVertices.size(); j++)
{
dmax = PxMax(dmax, mVertices[j].dot(mInputPlanes[i].n) + mInputPlanes[i].d);
dmin = PxMin(dmin, mVertices[j].dot(mInputPlanes[i].n) + mInputPlanes[i].d);
}
float dr = dmax - dmin;
if (dr < planeTestEpsilon)
dr = 1.0f; // shouldn't happen.
d = dmax / dr;
// we have a better candidate try another one
if (d <= md)
continue;
// check if we dont have already that plane or if the normals are nearly the same
for (j = 0; j<mFacets.size(); j++)
{
if (mInputPlanes[i] == mFacets[j])
{
d = 0.0f;
continue;
}
if (mInputPlanes[i].n.dot(mFacets[j].n)> local::MAXDOT_MINANG)
{
for (PxU32 k = 0; k < mEdges.size(); k++)
{
if (mEdges[k].p != j)
continue;
if (mVertices[mEdges[k].v].dot(mInputPlanes[i].n) + mInputPlanes[i].d < 0)
{
d = 0; // so this plane wont get selected.
break;
}
}
}
}
if (d>md)
{
p = PxI32(i);
md = d;
}
}
return (md > epsilon) ? p : -1;
}
//////////////////////////////////////////////////////////////////////////
// internal hull check
bool ConvexHull::assertIntact(float epsilon) const
{
PxU32 i;
PxU32 estart = 0;
for (i = 0; i < mEdges.size(); i++)
{
if (mEdges[estart].p != mEdges[i].p)
{
estart = i;
}
PxU32 inext = i + 1;
if (inext >= mEdges.size() || mEdges[inext].p != mEdges[i].p)
{
inext = estart;
}
PX_ASSERT(mEdges[inext].p == mEdges[i].p);
PxI16 nb = mEdges[i].ea;
if (nb == 255 || nb == -1)
return false;
PX_ASSERT(nb != -1);
PX_ASSERT(i == PxU32(mEdges[PxU32(nb)].ea));
// Check that the vertex of the next edge is the vertex of the adjacent half edge.
// Otherwise the two half edges are not really adjacent and we have a hole.
PX_ASSERT(mEdges[PxU32(nb)].v == mEdges[inext].v);
if (!(mEdges[PxU32(nb)].v == mEdges[inext].v))
return false;
}
for (i = 0; i < mEdges.size(); i++)
{
PX_ASSERT(local::eCOPLANAR == local::planeTest(mFacets[mEdges[i].p], mVertices[mEdges[i].v], epsilon));
if (local::eCOPLANAR != local::planeTest(mFacets[mEdges[i].p], mVertices[mEdges[i].v], epsilon))
return false;
if (mEdges[estart].p != mEdges[i].p)
{
estart = i;
}
PxU32 i1 = i + 1;
if (i1 >= mEdges.size() || mEdges[i1].p != mEdges[i].p) {
i1 = estart;
}
PxU32 i2 = i1 + 1;
if (i2 >= mEdges.size() || mEdges[i2].p != mEdges[i].p) {
i2 = estart;
}
if (i == i2)
continue; // i sliced tangent to an edge and created 2 meaningless edges
// check the face normal against the triangle from edges
PxVec3 localNormal = (mVertices[mEdges[i1].v] - mVertices[mEdges[i].v]).cross(mVertices[mEdges[i2].v] - mVertices[mEdges[i1].v]);
const float m = localNormal.magnitude();
if (m == 0.0f)
localNormal = PxVec3(1.f, 0.0f, 0.0f);
localNormal *= (1.0f / m);
if (localNormal.dot(mFacets[mEdges[i].p].n) <= 0.0f)
return false;
}
return true;
}
// returns the maximum number of vertices on a face
PxU32 ConvexHull::maxNumVertsPerFace() const
{
PxU32 maxVerts = 0;
PxU32 currentVerts = 0;
PxU32 estart = 0;
for (PxU32 i = 0; i < mEdges.size(); i++)
{
if (mEdges[estart].p != mEdges[i].p)
{
if(currentVerts > maxVerts)
{
maxVerts = currentVerts + 1;
}
currentVerts = 0;
estart = i;
}
else
{
currentVerts++;
}
}
return maxVerts;
}
//////////////////////////////////////////////////////////////////////////
// slice the input convexHull with the slice plane
ConvexHull* physx::convexHullCrop(const ConvexHull& convex, const PxPlane& slice, float planeTestEpsilon)
{
static const PxU8 invalidIndex = PxU8(-1);
PxU32 i;
PxU32 vertCountUnder = 0; // Running count of the vertices UNDER the slicing plane.
PX_ASSERT(convex.getEdges().size() < 480);
// Arrays of mapping information associated with features in the input convex.
// edgeflag[i].undermap - output index of input edge convex->edges[i]
// vertflag[i].undermap - output index of input vertex convex->vertices[i]
// vertflag[i].planetest - the side-of-plane classification of convex->vertices[i]
// (There are other members but they are unused.)
local::EdgeFlag edgeFlag[512];
local::VertFlag vertFlag[256];
// Lists of output features. Populated during clipping.
// Coplanar edges have one sibling in tmpunderedges and one in coplanaredges.
// coplanaredges holds the sibling that belong to the new polygon created from slicing.
ConvexHull::HalfEdge tmpUnderEdges[512]; // The output edge list.
PxPlane tmpUnderPlanes[128]; // The output plane list.
local::Coplanar coplanarEdges[512]; // The coplanar edge list.
PxU32 coplanarEdgesNum = 0; // Running count of coplanar edges.
// Created vertices on the slicing plane (stored for output after clipping).
PxArray<PxVec3> createdVerts;
// Logical OR of individual vertex flags.
PxU32 convexClipFlags = 0;
// Classify each vertex against the slicing plane as OVER | COPLANAR | UNDER.
// OVER - Vertex is over (outside) the slicing plane. Will not be output.
// COPLANAR - Vertex is on the slicing plane. A copy will be output.
// UNDER - Vertex is under (inside) the slicing plane. Will be output.
// We keep an array of information structures for each vertex in the input convex.
// vertflag[i].undermap - The (computed) index of convex->vertices[i] in the output.
// invalidIndex for OVER vertices - they are not output.
// initially invalidIndex for COPLANAR vertices - set later.
// vertflag[i].overmap - Unused - we don't care about the over part.
// vertflag[i].planetest - The classification (clip flag) of convex->vertices[i].
for (i = 0; i < convex.getVertices().size(); i++)
{
local::PlaneTestResult vertexClipFlag = local::planeTest(slice, convex.getVertices()[i], planeTestEpsilon);
switch (vertexClipFlag)
{
case local::eOVER:
case local::eCOPLANAR:
vertFlag[i].undermap = invalidIndex; // Initially invalid for COPLANAR
vertFlag[i].overmap = invalidIndex;
break;
case local::eUNDER:
vertFlag[i].undermap = PxTo8(vertCountUnder++);
vertFlag[i].overmap = invalidIndex;
break;
}
vertFlag[i].planetest = PxU8(vertexClipFlag);
convexClipFlags |= vertexClipFlag;
}
// Check special case: everything UNDER or COPLANAR.
// This way we know we wont end up with silly faces / edges later on.
if ((convexClipFlags & local::eOVER) == 0)
{
// Just return a copy of the same convex.
ConvexHull* dst = PX_NEW(ConvexHull)(convex);
return dst;
}
PxU16 underEdgeCount = 0; // Running count of output edges.
PxU16 underPlanesCount = 0; // Running count of output planes.
// Clipping Loop
// =============
//
// for each plane
//
// for each edge
//
// if first UNDER & second !UNDER
// output current edge -> tmpunderedges
// if we have done the sibling
// connect current edge to its sibling
// set vout = first vertex of sibling
// else if second is COPLANAR
// if we havent already copied it
// copy second -> createdverts
// set vout = index of created vertex
// else
// generate a new vertex -> createdverts
// set vout = index of created vertex
// if vin is already set and vin != vout (non-trivial edge)
// output coplanar edge -> tmpunderedges (one sibling)
// set coplanaredge to new edge index (for connecting the other sibling)
//
// else if first !UNDER & second UNDER
// if we have done the sibling
// connect current edge to its sibling
// set vin = second vertex of sibling (this is a bit of a pain)
// else if first is COPLANAR
// if we havent already copied it
// copy first -> createdverts
// set vin = index of created vertex
// else
// generate a new vertex -> createdverts
// set vin = index of created vertex
// if vout is already set and vin != vout (non-trivial edge)
// output coplanar edge -> tmpunderedges (one sibling)
// set coplanaredge to new edge index (for connecting the other sibling)
// output current edge -> tmpunderedges
//
// else if first UNDER & second UNDER
// output current edge -> tmpunderedges
//
// next edge
//
// if part of current plane was UNDER
// output current plane -> tmpunderplanes
//
// if coplanaredge is set
// output coplanar edge -> coplanaredges
//
// next plane
//
// Indexing is a bit tricky here:
//
// e0 - index of the current edge
// e1 - index of the next edge
// estart - index of the first edge in the current plane
// currentplane - index of the current plane
// enextface - first edge of next plane
PxU32 e0 = 0;
for (PxU32 currentplane = 0; currentplane < convex.getFacets().size(); currentplane++)
{
PxU32 eStart = e0;
PxU32 eNextFace = 0xffffffff;
PxU32 e1 = e0 + 1;
PxU8 vout = invalidIndex;
PxU8 vin = invalidIndex;
PxU32 coplanarEdge = invalidIndex;
// Logical OR of individual vertex flags in the current plane.
PxU32 planeSide = 0;
do{
// Next edge modulo logic
if (e1 >= convex.getEdges().size() || convex.getEdges()[e1].p != currentplane)
{
eNextFace = e1;
e1 = eStart;
}
const ConvexHull::HalfEdge& edge0 = convex.getEdges()[e0];
const ConvexHull::HalfEdge& edge1 = convex.getEdges()[e1];
const ConvexHull::HalfEdge& edgea = convex.getEdges()[PxU32(edge0.ea)];
planeSide |= vertFlag[edge0.v].planetest;
if (vertFlag[edge0.v].planetest == local::eUNDER && vertFlag[edge1.v].planetest != local::eUNDER)
{
// first is UNDER, second is COPLANAR or OVER
// Output current edge.
edgeFlag[e0].undermap = short(underEdgeCount);
tmpUnderEdges[underEdgeCount].v = vertFlag[edge0.v].undermap;
tmpUnderEdges[underEdgeCount].p = PxU8(underPlanesCount);
PX_ASSERT(tmpUnderEdges[underEdgeCount].v != invalidIndex);
if (PxU32(edge0.ea) < e0)
{
// We have already done the sibling.
// Connect current edge to its sibling.
PX_ASSERT(edgeFlag[edge0.ea].undermap != invalidIndex);
tmpUnderEdges[underEdgeCount].ea = edgeFlag[edge0.ea].undermap;
tmpUnderEdges[edgeFlag[edge0.ea].undermap].ea = short(underEdgeCount);
// Set vout = first vertex of (output, clipped) sibling.
vout = tmpUnderEdges[edgeFlag[edge0.ea].undermap].v;
}
else if (vertFlag[edge1.v].planetest == local::eCOPLANAR)
{
// Boundary case.
// We output coplanar vertices once.
if (vertFlag[edge1.v].undermap == invalidIndex)
{
createdVerts.pushBack(convex.getVertices()[edge1.v]);
// Remember the index so we don't output it again.
vertFlag[edge1.v].undermap = PxTo8(vertCountUnder++);
}
vout = vertFlag[edge1.v].undermap;
}
else
{
// Add new vertex.
const PxPlane& p0 = convex.getFacets()[edge0.p];
const PxPlane& pa = convex.getFacets()[edgea.p];
createdVerts.pushBack(threePlaneIntersection(p0, pa, slice));
vout = PxTo8(vertCountUnder++);
}
// We added an edge, increment the counter
underEdgeCount++;
if (vin != invalidIndex && vin != vout)
{
// We already have vin and a non-trivial edge
// Output coplanar edge
PX_ASSERT(vout != invalidIndex);
coplanarEdge = underEdgeCount;
tmpUnderEdges[underEdgeCount].v = vout;
tmpUnderEdges[underEdgeCount].p = PxU8(underPlanesCount);
tmpUnderEdges[underEdgeCount].ea = invalidIndex;
underEdgeCount++;
}
}
else if (vertFlag[edge0.v].planetest != local::eUNDER && vertFlag[edge1.v].planetest == local::eUNDER)
{
// First is OVER or COPLANAR, second is UNDER.
if (PxU32(edge0.ea) < e0)
{
// We have already done the sibling.
// We need the second vertex of the sibling.
// Which is the vertex of the next edge in the adjacent poly.
int nea = edgeFlag[edge0.ea].undermap + 1;
int p = tmpUnderEdges[edgeFlag[edge0.ea].undermap].p;
if (nea >= underEdgeCount || tmpUnderEdges[nea].p != p)
{
// End of polygon, next edge is first edge
nea -= 2;
while (nea > 0 && tmpUnderEdges[nea - 1].p == p)
nea--;
}
vin = tmpUnderEdges[nea].v;
PX_ASSERT(vin < vertCountUnder);
}
else if (vertFlag[edge0.v].planetest == local::eCOPLANAR)
{
// Boundary case.
// We output coplanar vertices once.
if (vertFlag[edge0.v].undermap == invalidIndex)
{
createdVerts.pushBack(convex.getVertices()[edge0.v]);
// Remember the index so we don't output it again.
vertFlag[edge0.v].undermap = PxTo8(vertCountUnder++);
}
vin = vertFlag[edge0.v].undermap;
}
else
{
// Add new vertex.
const PxPlane& p0 = convex.getFacets()[edge0.p];
const PxPlane& pa = convex.getFacets()[edgea.p];
createdVerts.pushBack(threePlaneIntersection(p0, pa, slice));
vin = PxTo8(vertCountUnder++);
}
if (vout != invalidIndex && vin != vout)
{
// We have been in and out, Add the coplanar edge
coplanarEdge = underEdgeCount;
tmpUnderEdges[underEdgeCount].v = vout;
tmpUnderEdges[underEdgeCount].p = PxTo8(underPlanesCount);
tmpUnderEdges[underEdgeCount].ea = invalidIndex;
underEdgeCount++;
}
// Output current edge.
tmpUnderEdges[underEdgeCount].v = vin;
tmpUnderEdges[underEdgeCount].p = PxTo8(underPlanesCount);
edgeFlag[e0].undermap = short(underEdgeCount);
if (PxU32(edge0.ea) < e0)
{
// We have already done the sibling.
// Connect current edge to its sibling.
PX_ASSERT(edgeFlag[edge0.ea].undermap != invalidIndex);
tmpUnderEdges[underEdgeCount].ea = edgeFlag[edge0.ea].undermap;
tmpUnderEdges[edgeFlag[edge0.ea].undermap].ea = short(underEdgeCount);
}
PX_ASSERT(edgeFlag[e0].undermap == underEdgeCount);
underEdgeCount++;
}
else if (vertFlag[edge0.v].planetest == local::eUNDER && vertFlag[edge1.v].planetest == local::eUNDER)
{
// Both UNDER
// Output current edge.
edgeFlag[e0].undermap = short(underEdgeCount);
tmpUnderEdges[underEdgeCount].v = vertFlag[edge0.v].undermap;
tmpUnderEdges[underEdgeCount].p = PxTo8(underPlanesCount);
if (PxU32(edge0.ea) < e0)
{
// We have already done the sibling.
// Connect current edge to its sibling.
PX_ASSERT(edgeFlag[edge0.ea].undermap != invalidIndex);
tmpUnderEdges[underEdgeCount].ea = edgeFlag[edge0.ea].undermap;
tmpUnderEdges[edgeFlag[edge0.ea].undermap].ea = short(underEdgeCount);
}
underEdgeCount++;
}
e0 = e1;
e1++; // do the modulo at the beginning of the loop
} while (e0 != eStart);
e0 = eNextFace;
if (planeSide & local::eUNDER)
{
// At least part of current plane is UNDER.
// Output current plane.
tmpUnderPlanes[underPlanesCount] = convex.getFacets()[currentplane];
underPlanesCount++;
}
if (coplanarEdge != invalidIndex)
{
// We have a coplanar edge.
// Add to coplanaredges for later processing.
// (One sibling is in place but one is missing)
PX_ASSERT(vin != invalidIndex);
PX_ASSERT(vout != invalidIndex);
PX_ASSERT(coplanarEdge != 511);
coplanarEdges[coplanarEdgesNum].ea = PxU8(coplanarEdge);
coplanarEdges[coplanarEdgesNum].v0 = vin;
coplanarEdges[coplanarEdgesNum].v1 = vout;
coplanarEdgesNum++;
}
// Reset coplanar edge infos for next poly
vin = invalidIndex;
vout = invalidIndex;
coplanarEdge = invalidIndex;
}
// Add the new plane to the mix:
if (coplanarEdgesNum > 0)
{
tmpUnderPlanes[underPlanesCount++] = slice;
}
// Sort the coplanar edges in winding order.
for (i = 0; i < coplanarEdgesNum - 1; i++)
{
if (coplanarEdges[i].v1 != coplanarEdges[i + 1].v0)
{
PxU32 j = 0;
for (j = i + 2; j < coplanarEdgesNum; j++)
{
if (coplanarEdges[i].v1 == coplanarEdges[j].v0)
{
local::Coplanar tmp = coplanarEdges[i + 1];
coplanarEdges[i + 1] = coplanarEdges[j];
coplanarEdges[j] = tmp;
break;
}
}
if (j >= coplanarEdgesNum)
{
// PX_ASSERT(j<coplanaredges_num);
return NULL;
}
}
}
// PT: added this line to fix DE2904
if (!vertCountUnder)
return NULL;
// Create the output convex.
ConvexHull* punder = PX_NEW(ConvexHull)(convex.getInputPlanes());
ConvexHull& under = *punder;
// Copy UNDER vertices
PxU32 k = 0;
for (i = 0; i < convex.getVertices().size(); i++)
{
if (vertFlag[i].planetest == local::eUNDER)
{
under.getVertices().pushBack(convex.getVertices()[i]);
k++;
}
}
// Copy created vertices
i = 0;
while (k < vertCountUnder)
{
under.getVertices().pushBack(createdVerts[i++]);
k++;
}
PX_ASSERT(i == createdVerts.size());
// Copy the output edges and output planes.
under.getEdges().resize(underEdgeCount + coplanarEdgesNum);
under.getFacets().resize(underPlanesCount);
// Add the coplanar edge siblings that belong to the new polygon (coplanaredges).
for (i = 0; i < coplanarEdgesNum; i++)
{
under.getEdges()[underEdgeCount + i].p = PxU8(underPlanesCount - 1);
under.getEdges()[underEdgeCount + i].ea = short(coplanarEdges[i].ea);
tmpUnderEdges[coplanarEdges[i].ea].ea = PxI16(underEdgeCount + i);
under.getEdges()[underEdgeCount + i].v = coplanarEdges[i].v0;
}
PxMemCopy(under.getEdges().begin(), tmpUnderEdges, sizeof(ConvexHull::HalfEdge)*underEdgeCount);
PxMemCopy(under.getFacets().begin(), tmpUnderPlanes, sizeof(PxPlane)*underPlanesCount);
return punder;
}
bool physx::computeOBBFromConvex(const PxConvexMeshDesc& desc, PxVec3& sides, PxTransform& matrix)
{
PxIntegrals integrals;
// using the centroid of the convex for the volume integration solved accuracy issues in cases where the inertia tensor
// ended up close to not being positive definite and after a few further transforms the diagonalized inertia tensor ended
// up with negative values.
const PxVec3* verts = (reinterpret_cast<const PxVec3*>(desc.points.data));
const PxU32* ind = (reinterpret_cast<const PxU32*>(desc.indices.data));
const PxHullPolygon* polygons = (reinterpret_cast<const PxHullPolygon*>(desc.polygons.data));
PxVec3 mean(0.0f);
for (PxU32 i = 0; i < desc.points.count; i++)
mean += verts[i];
mean *= (1.0f / desc.points.count);
PxU8* indices = PX_ALLOCATE(PxU8, desc.indices.count, "PxU8");
for (PxU32 i = 0; i < desc.indices.count; i++)
{
indices[i] = PxTo8(ind[i]);
}
// we need to move the polygon data to internal format
Gu::HullPolygonData* polygonData = PX_ALLOCATE(Gu::HullPolygonData, desc.polygons.count, "Gu::HullPolygonData");
for (PxU32 i = 0; i < desc.polygons.count; i++)
{
polygonData[i].mPlane = PxPlane(polygons[i].mPlane[0], polygons[i].mPlane[1], polygons[i].mPlane[2], polygons[i].mPlane[3]);
polygonData[i].mNbVerts = PxTo8(polygons[i].mNbVerts);
polygonData[i].mVRef8 = polygons[i].mIndexBase;
}
PxConvexMeshDesc inDesc;
inDesc.points.data = desc.points.data;
inDesc.points.count = desc.points.count;
inDesc.polygons.data = polygonData;
inDesc.polygons.count = desc.polygons.count;
inDesc.indices.data = indices;
inDesc.indices.count = desc.indices.count;
// compute volume integrals to get basis axis
if(computeVolumeIntegralsEberly(inDesc, 1.0f, integrals, mean, desc.flags & PxConvexFlag::eFAST_INERTIA_COMPUTATION))
{
Vec4V* pointsV = PX_ALLOCATE(Vec4V, desc.points.count, "Vec4V");
for (PxU32 i = 0; i < desc.points.count; i++)
{
// safe to V4 load, same as volume integration - we allocate one more vector
pointsV[i] = V4LoadU(&verts[i].x);
}
PxMat33 inertia;
integrals.getOriginInertia(inertia);
PxQuat inertiaQuat;
PxDiagonalize(inertia, inertiaQuat);
const PxMat33Padded baseAxis(inertiaQuat);
Vec4V center = V4LoadU(&integrals.COM.x);
const PxU32 numSteps = 20;
const float subStep = PxDegToRad(float(360/numSteps));
float bestVolume = FLT_MAX;
for (PxU32 axis = 0; axis < 3; axis++)
{
for (PxU32 iStep = 0; iStep < numSteps; iStep++)
{
PxQuat quat(iStep*subStep, baseAxis[axis]);
Vec4V transV = center;
Vec4V psidesV;
const QuatV rotV = QuatVLoadU(&quat.x);
local::computeOBBSIMD(desc.points.count, pointsV, psidesV, rotV, transV);
PxVec3 psides;
V3StoreU(Vec3V_From_Vec4V(psidesV), psides);
const float volume = psides[0] * psides[1] * psides[2]; // the volume of the cube
if (volume <= bestVolume)
{
bestVolume = volume;
sides = psides;
V4StoreU(rotV, &matrix.q.x);
V3StoreU(Vec3V_From_Vec4V(transV), matrix.p);
}
}
}
PX_FREE(pointsV);
}
else
{
PX_FREE(indices);
PX_FREE(polygonData);
return false;
}
PX_FREE(indices);
PX_FREE(polygonData);
return true;
}
| 30,807 | C++ | 32.305946 | 134 | 0.650761 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingConvexHullBuilder.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_COOKING_CONVEX_HULL_BUILDER_H
#define GU_COOKING_CONVEX_HULL_BUILDER_H
#include "cooking/PxCooking.h"
#include "GuConvexMeshData.h"
#include "foundation/PxUserAllocated.h"
namespace physx
{
struct PxHullPolygon;
class ConvexHullLib;
namespace Gu
{
struct EdgeDescData;
struct ConvexHullData;
} // namespace Gu
class ConvexHullBuilder : public PxUserAllocated
{
public:
ConvexHullBuilder(Gu::ConvexHullData* hull, const bool buildGRBData);
~ConvexHullBuilder();
bool init(PxU32 nbVerts, const PxVec3* verts, const PxU32* indices, const PxU32 nbIndices, const PxU32 nbPolygons,
const PxHullPolygon* hullPolygons, bool doValidation = true, ConvexHullLib* hullLib = NULL);
bool save(PxOutputStream& stream, bool platformMismatch) const;
bool copy(Gu::ConvexHullData& hullData, PxU32& nb);
bool createEdgeList(bool doValidation, PxU32 nbEdges);
bool checkHullPolygons() const;
bool calculateVertexMapTable(PxU32 nbPolygons, bool userPolygons = false);
PX_INLINE PxU32 computeNbPolygons() const
{
PX_ASSERT(mHull->mNbPolygons);
return mHull->mNbPolygons;
}
PxVec3* mHullDataHullVertices;
Gu::HullPolygonData* mHullDataPolygons;
PxU8* mHullDataVertexData8;
PxU8* mHullDataFacesByEdges8;
PxU8* mHullDataFacesByVertices8;
PxU16* mEdgeData16; //!< Edge indices indexed by hull polygons
PxU16* mEdges; //!< Edge to vertex mapping
Gu::ConvexHullData* mHull;
bool mBuildGRBData;
};
}
#endif
| 3,378 | C | 37.397727 | 125 | 0.715808 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingTriangleMesh.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_COOKING_TRIANGLE_MESH_H
#define GU_COOKING_TRIANGLE_MESH_H
#include "GuMeshData.h"
#include "cooking/PxCooking.h"
namespace physx
{
namespace Gu
{
class EdgeList;
}
class TriangleMeshBuilder
{
public:
TriangleMeshBuilder(Gu::TriangleMeshData& mesh, const PxCookingParams& params);
virtual ~TriangleMeshBuilder();
virtual PxMeshMidPhase::Enum getMidphaseID() const = 0;
// Called by base code when midphase structure should be built
virtual bool createMidPhaseStructure() = 0;
// Called by base code when midphase structure should be saved
virtual void saveMidPhaseStructure(PxOutputStream& stream, bool mismatch) const = 0;
// Called by base code when mesh index format has changed and the change should be reflected in midphase structure
virtual void onMeshIndexFormatChange() {}
bool cleanMesh(bool validate, PxTriangleMeshCookingResult::Enum* condition);
void remapTopology(const PxU32* order);
void createVertMapping();
void createSharedEdgeData(bool buildAdjacencies, bool buildActiveEdges);
void recordTriangleIndices();
bool createGRBMidPhaseAndData(const PxU32 originalTriangleCount);
void createGRBData();
bool loadFromDesc(const PxTriangleMeshDesc&, PxTriangleMeshCookingResult::Enum* condition, bool validate = false);
bool save(PxOutputStream& stream, bool platformMismatch, const PxCookingParams& params) const;
void checkMeshIndicesSize();
PX_FORCE_INLINE Gu::TriangleMeshData& getMeshData() { return mMeshData; }
protected:
bool importMesh(const PxTriangleMeshDesc& desc, PxTriangleMeshCookingResult::Enum* condition, bool validate = false);
bool loadFromDescInternal(PxTriangleMeshDesc&, PxTriangleMeshCookingResult::Enum* condition, bool validate = false);
void buildInertiaTensor(bool flipNormals = false);
void buildInertiaTensorFromSDF();
TriangleMeshBuilder& operator=(const TriangleMeshBuilder&);
Gu::EdgeList* mEdgeList;
const PxCookingParams& mParams;
Gu::TriangleMeshData& mMeshData;
};
class RTreeTriangleMeshBuilder : public TriangleMeshBuilder
{
public:
RTreeTriangleMeshBuilder(const PxCookingParams& params);
virtual ~RTreeTriangleMeshBuilder();
virtual PxMeshMidPhase::Enum getMidphaseID() const PX_OVERRIDE { return PxMeshMidPhase::eBVH33; }
virtual bool createMidPhaseStructure() PX_OVERRIDE;
virtual void saveMidPhaseStructure(PxOutputStream& stream, bool mismatch) const PX_OVERRIDE;
Gu::RTreeTriangleData mData;
};
class BV4TriangleMeshBuilder : public TriangleMeshBuilder
{
public:
BV4TriangleMeshBuilder(const PxCookingParams& params);
virtual ~BV4TriangleMeshBuilder();
virtual PxMeshMidPhase::Enum getMidphaseID() const PX_OVERRIDE { return PxMeshMidPhase::eBVH34; }
virtual bool createMidPhaseStructure() PX_OVERRIDE;
virtual void saveMidPhaseStructure(PxOutputStream& stream, bool mismatch) const PX_OVERRIDE;
virtual void onMeshIndexFormatChange();
Gu::BV4TriangleData mData;
};
class BV32TriangleMeshBuilder
{
public:
static bool createMidPhaseStructure(const PxCookingParams& params, Gu::TriangleMeshData& meshData, Gu::BV32Tree& bv32Tree);
static void saveMidPhaseStructure(Gu::BV32Tree* tree, PxOutputStream& stream, bool mismatch);
};
}
#endif
| 5,182 | C | 41.138211 | 130 | 0.743921 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingTetrahedronMesh.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
#define USE_GJK_VIRTUAL
#include "GuCookingTetrahedronMesh.h"
#include "GuTetrahedron.h"
#include "GuInternal.h"
#include "foundation/PxHashMap.h"
#include "GuCookingTriangleMesh.h"
#include "GuBV4Build.h"
#include "GuBV32Build.h"
#include "GuDistancePointTetrahedron.h"
#ifdef USE_GJK_VIRTUAL
#include "GuGJKTest.h"
#else
#include "GuGJKUtil.h"
#include "GuGJK.h"
#endif
#include "GuVecTetrahedron.h"
#include "GuGJKType.h"
#include "GuCooking.h"
#include "GuBounds.h"
#include "CmSerialize.h"
#include "foundation/PxFPU.h"
#include "common/PxInsertionCallback.h"
using namespace physx;
using namespace Gu;
using namespace Cm;
using namespace physx;
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/*TetrahedronMeshBuilder::TetrahedronMeshBuilder(const PxCookingParams& params) : mParams(params)
{
}*/
void TetrahedronMeshBuilder::recordTetrahedronIndices(const TetrahedronMeshData& collisionMesh, SoftBodyCollisionData& collisionData, bool buildGPUData)
{
if (buildGPUData)
{
PX_ASSERT(!(collisionMesh.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
PX_ASSERT(collisionData.mGRB_primIndices);
//copy the BV4 tetrahedron indices to mGRB_primIndices
PxMemCopy(collisionData.mGRB_primIndices, collisionMesh.mTetrahedrons, sizeof(IndTetrahedron32) * collisionMesh.mNbTetrahedrons);
}
}
class SortedTriangleInds
{
public:
SortedTriangleInds() {}
SortedTriangleInds(const PxU32 ref0, const PxU32 ref1, const PxU32 ref2)
{
initialize(ref0, ref1, ref2);
}
SortedTriangleInds(const PxU16 ref0, const PxU16 ref1, const PxU16 ref2)
{
initialize(PxU32(ref0), PxU32(ref1), PxU32(ref2));
}
void initialize(const PxU32 ref0, const PxU32 ref1, const PxU32 ref2)
{
mOrigRef[0] = ref0;
mOrigRef[1] = ref1;
mOrigRef[2] = ref2;
if (ref0 < ref1 && ref0 < ref2)
{
mRef0 = ref0;
mRef1 = PxMin(ref1, ref2);
mRef2 = PxMax(ref1, ref2);
}
else if (ref1 < ref2)
{
mRef0 = ref1;
mRef1 = PxMin(ref0, ref2);
mRef2 = PxMax(ref0, ref2);
}
else
{
mRef0 = ref2;
mRef1 = PxMin(ref0, ref1);
mRef2 = PxMax(ref0, ref1);
}
}
bool operator == (const SortedTriangleInds& other) const
{
return other.mRef0 == mRef0 && other.mRef1 == mRef1 && other.mRef2 == mRef2;
}
static uint32_t hash(const SortedTriangleInds key)
{
uint64_t k0 = (key.mRef0 & 0xffff);
uint64_t k1 = (key.mRef1 & 0xffff);
uint64_t k2 = (key.mRef2 & 0xffff);
uint64_t k = (k2 << 32) | (k1 << 16) | k0;
k += ~(k << 32);
k ^= (k >> 22);
k += ~(k << 13);
k ^= (k >> 8);
k += (k << 3);
k ^= (k >> 15);
k += ~(k << 27);
k ^= (k >> 31);
return uint32_t(UINT32_MAX & k);
}
void setTetIndex(const PxU32 tetIndex)
{
mTetIndex = tetIndex;
}
PxU32 getTetIndex()
{
return mTetIndex;
}
PxU32 mOrigRef[3];
PxU32 mRef0;
PxU32 mRef1;
PxU32 mRef2;
PxU32 mTetIndex;
};
struct SortedTriangleIndsHash
{
uint32_t operator()(const SortedTriangleInds& k) const
{
return SortedTriangleInds::hash(k);
}
bool equal(const SortedTriangleInds& k0, const SortedTriangleInds& k1) const
{
return k0 == k1;
}
};
#if PX_CHECKED
bool checkInputFloats(PxU32 nb, const float* values, const char* file, PxU32 line, const char* errorMsg);
#endif
bool TetrahedronMeshBuilder::importMesh(const PxTetrahedronMeshDesc& collisionMeshDesc, const PxCookingParams& params,
TetrahedronMeshData& collisionMesh, SoftBodyCollisionData& collisionData, bool validateMesh)
{
PX_UNUSED(validateMesh);
//convert and clean the input mesh
//this is where the mesh data gets copied from user mem to our mem
PxVec3* verts = collisionMesh.allocateVertices(collisionMeshDesc.points.count);
collisionMesh.allocateTetrahedrons(collisionMeshDesc.tetrahedrons.count, 1);
if (params.buildGPUData)
collisionData.allocateCollisionData(collisionMeshDesc.tetrahedrons.count);
TetrahedronT<PxU32>* tets = reinterpret_cast<TetrahedronT<PxU32>*>(collisionMesh.mTetrahedrons);
//copy, and compact to get rid of strides:
immediateCooking::gatherStrided(collisionMeshDesc.points.data, verts, collisionMesh.mNbVertices, sizeof(PxVec3), collisionMeshDesc.points.stride);
#if PX_CHECKED
// PT: check all input vertices are valid
if(!checkInputFloats(collisionMeshDesc.points.count*3, &verts->x, PX_FL, "input mesh contains corrupted vertex data"))
return false;
#endif
TetrahedronT<PxU32>* dest = tets;
const TetrahedronT<PxU32>* pastLastDest = tets + collisionMesh.mNbTetrahedrons;
const PxU8* source = reinterpret_cast<const PxU8*>(collisionMeshDesc.tetrahedrons.data);
PX_ASSERT(source);
//4 combos of 16 vs 32, feed in collisionMesh.mTetrahedrons
if (collisionMeshDesc.flags & PxMeshFlag::e16_BIT_INDICES)
{
while (dest < pastLastDest)
{
const PxU16 *tet16 = reinterpret_cast<const PxU16*>(source);
dest->v[0] = tet16[0];
dest->v[1] = tet16[1];
dest->v[2] = tet16[2];
dest->v[3] = tet16[3];
dest++;
source += collisionMeshDesc.tetrahedrons.stride;
}
}
else
{
while (dest < pastLastDest)
{
const PxU32 * tet32 = reinterpret_cast<const PxU32*>(source);
dest->v[0] = tet32[0];
dest->v[1] = tet32[1];
dest->v[2] = tet32[2];
dest->v[3] = tet32[3];
dest++;
source += collisionMeshDesc.tetrahedrons.stride;
}
}
//copy the material index list if any:
if (collisionMeshDesc.materialIndices.data)
{
PxFEMMaterialTableIndex* materials = collisionMesh.allocateMaterials();
immediateCooking::gatherStrided(collisionMeshDesc.materialIndices.data, materials, collisionMesh.mNbTetrahedrons, sizeof(PxMaterialTableIndex), collisionMeshDesc.materialIndices.stride);
// Check material indices
for (PxU32 i = 0; i < collisionMesh.mNbTetrahedrons; i++) PX_ASSERT(materials[i] != 0xffff);
}
// we need to fill the remap table if no cleaning was done
if (params.suppressTriangleMeshRemapTable == false)
{
PX_ASSERT(collisionData.mFaceRemap == NULL);
collisionData.mFaceRemap = PX_ALLOCATE(PxU32, collisionMesh.mNbTetrahedrons, "mFaceRemap");
for (PxU32 i = 0; i < collisionMesh.mNbTetrahedrons; i++)
collisionData.mFaceRemap[i] = i;
}
return true;
}
bool TetrahedronMeshBuilder::createGRBMidPhaseAndData(const PxU32 originalTetrahedronCount, TetrahedronMeshData& collisionMesh, SoftBodyCollisionData& collisionData, const PxCookingParams& params)
{
PX_UNUSED(originalTetrahedronCount);
if (params.buildGPUData)
{
PX_ASSERT(!(collisionMesh.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
BV32Tree* bv32Tree = PX_NEW(BV32Tree);
collisionData.mGRB_BV32Tree = bv32Tree;
if(!BV32TetrahedronMeshBuilder::createMidPhaseStructure(params, collisionMesh, *bv32Tree, collisionData))
return false;
//create surface triangles, one tetrahedrons has 4 triangles
PxHashMap<SortedTriangleInds, PxU32, SortedTriangleIndsHash> triIndsMap;
//for trigs index stride conversion and eventual reordering is also needed, I don't think flexicopy can do that for us.
IndTetrahedron32* dest = reinterpret_cast<IndTetrahedron32*>(collisionData.mGRB_primIndices);
for(PxU32 i = 0; i < collisionMesh.mNbTetrahedrons; ++i)
{
IndTetrahedron32& tetInd = dest[i];
SortedTriangleInds t0(tetInd.mRef[0], tetInd.mRef[1], tetInd.mRef[2]);
t0.setTetIndex(i);
triIndsMap[t0] += 1;
SortedTriangleInds t1(tetInd.mRef[1], tetInd.mRef[3], tetInd.mRef[2]);
t1.setTetIndex(i);
triIndsMap[t1] += 1;
SortedTriangleInds t2(tetInd.mRef[0], tetInd.mRef[3], tetInd.mRef[1]);
t2.setTetIndex(i);
triIndsMap[t2] += 1;
SortedTriangleInds t3(tetInd.mRef[0], tetInd.mRef[2], tetInd.mRef[3]);
t3.setTetIndex(i);
triIndsMap[t3] += 1;
}
PxMemZero(collisionData.mGRB_tetraSurfaceHint, collisionMesh.mNbTetrahedrons * sizeof(PxU8));
PxU8* tetHint = reinterpret_cast<PxU8*>(collisionData.mGRB_tetraSurfaceHint);
PxU32 triCount = 0;
//compute the surface triangles for the tetrahedron mesh
for (PxHashMap<SortedTriangleInds, PxU32, SortedTriangleIndsHash>::Iterator iter = triIndsMap.getIterator(); !iter.done(); ++iter)
{
SortedTriangleInds key = iter->first;
// only output faces that are referenced by one tet (open faces)
if (iter->second == 1)
{
PxU8 triHint = 0;
IndTetrahedron32& localTetra = dest[key.mTetIndex];
for (PxU32 i = 0; i < 3; ++i)
{
if (key.mOrigRef[i] == localTetra.mRef[0])
triHint |= 1;
else if (key.mOrigRef[i] == localTetra.mRef[1])
triHint |= (1 << 1);
else if (key.mOrigRef[i] == localTetra.mRef[2])
triHint |= (1 << 2);
else if (key.mOrigRef[i] == localTetra.mRef[3])
triHint |= (1 << 3);
}
//if this tetrahedron isn't surface tetrahedron, hint will be zero
//otherwise, the first 4 bits will indicate the indice of the
//surface triangle
PxU32 mask = 0;
if (triHint == 7) //0111
{
mask = 1 << 0;
}
else if (triHint == 11)//1011
{
mask = 1 << 1;
}
else if (triHint == 13)//1101
{
mask = 1 << 2;
}
else //1110
{
mask = 1 << 3;
}
tetHint[key.mTetIndex] |= mask;
triCount++;
}
}
#if BV32_VALIDATE
IndTetrahedron32* grbTriIndices = reinterpret_cast<IndTetrahedron32*>(collisionData.mGRB_primIndices);
IndTetrahedron32* cpuTriIndices = reinterpret_cast<IndTetrahedron32*>(collisionMesh.mTetrahedrons);
//map CPU remap triangle index to GPU remap triangle index
for (PxU32 i = 0; i < nbTetrahedrons; ++i)
{
PX_ASSERT(grbTriIndices[i].mRef[0] == cpuTriIndices[collisionData.mGRB_faceRemap[i]].mRef[0]);
PX_ASSERT(grbTriIndices[i].mRef[1] == cpuTriIndices[collisionData.mGRB_faceRemap[i]].mRef[1]);
PX_ASSERT(grbTriIndices[i].mRef[2] == cpuTriIndices[collisionData.mGRB_faceRemap[i]].mRef[2]);
PX_ASSERT(grbTriIndices[i].mRef[3] == cpuTriIndices[collisionData.mGRB_faceRemap[i]].mRef[3]);
}
#endif
}
return true;
}
void computeRestPoseAndPointMass(TetrahedronT<PxU32>* tetIndices, const PxU32 nbTets,
const PxVec3* verts, PxReal* invMasses, PxMat33* restPoses)
{
for (PxU32 i = 0; i < nbTets; ++i)
{
TetrahedronT<PxU32>& tetInd = tetIndices[i];
PxMat33 Q, QInv;
const PxReal volume = computeTetrahedronVolume(verts[tetInd.v[0]], verts[tetInd.v[1]], verts[tetInd.v[2]], verts[tetInd.v[3]], Q);
if (volume <= 1.e-9f)
{
//Neo-hookean model can deal with bad tets, so not issueing this error anymore
//PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "computeRestPoseAndPointMass(): tretrahedron is degenerate or inverted");
if (volume == 0)
QInv = PxMat33(PxZero);
else
QInv = Q.getInverse();
}
else
QInv = Q.getInverse();
// add volume fraction to particles
if (invMasses != NULL)
{
invMasses[tetInd.v[0]] += volume * 0.25f;
invMasses[tetInd.v[1]] += volume * 0.25f;
invMasses[tetInd.v[2]] += volume * 0.25f;
invMasses[tetInd.v[3]] += volume * 0.25f;
}
restPoses[i] = QInv;
}
}
#define MAX_NUM_PARTITIONS 32
PxU32 computeTetrahedronPartition(const TetrahedronT<PxU32>* tets, const PxU32 partitionStartIndex, PxU32* partitionProgresses,
const PxU32 numTetsPerElement)
{
PxU32 combinedMask = 0xFFFFFFFF;
for (PxU32 i = 0; i < numTetsPerElement; ++i)
{
PxU32 partitionA = partitionProgresses[tets[i].v[0]];
PxU32 partitionB = partitionProgresses[tets[i].v[1]];
PxU32 partitionC = partitionProgresses[tets[i].v[2]];
PxU32 partitionD = partitionProgresses[tets[i].v[3]];
combinedMask &= (~partitionA & ~partitionB & ~partitionC & ~partitionD);
}
PxU32 availablePartition = combinedMask == 0 ? MAX_NUM_PARTITIONS : PxLowestSetBit(combinedMask);
if (availablePartition == MAX_NUM_PARTITIONS)
return 0xFFFFFFFF;
const PxU32 partitionBit = (1u << availablePartition);
for (PxU32 i = 0; i < numTetsPerElement; ++i)
{
PxU32 partitionA = partitionProgresses[tets[i].v[0]];
PxU32 partitionB = partitionProgresses[tets[i].v[1]];
PxU32 partitionC = partitionProgresses[tets[i].v[2]];
PxU32 partitionD = partitionProgresses[tets[i].v[3]];
partitionA |= partitionBit;
partitionB |= partitionBit;
partitionC |= partitionBit;
partitionD |= partitionBit;
partitionProgresses[tets[i].v[0]] = partitionA;
partitionProgresses[tets[i].v[1]] = partitionB;
partitionProgresses[tets[i].v[2]] = partitionC;
partitionProgresses[tets[i].v[3]] = partitionD;
}
availablePartition += partitionStartIndex;
return availablePartition;
}
void classifyTetrahedrons(const TetrahedronT<PxU32>* tets, const PxU32 numTets, const PxU32 numVerts, const PxU32 numTetsPerElement,
PxU32* partitionProgresses, PxU32* tempTetrahedrons, PxArray<PxU32>& tetrahedronsPerPartition)
{
//initialize the partition progress counter to be zero
PxMemZero(partitionProgresses, sizeof(PxU32) * numVerts);
PxU32 numUnpartitionedTetrahedrons = 0;
//compute partitions for each tetrahedron in the grid model
for (PxU32 i = 0; i < numTets; i += numTetsPerElement)
{
const TetrahedronT<PxU32>* tet = &tets[i];
const PxU32 availablePartition = computeTetrahedronPartition(tet, 0, partitionProgresses, numTetsPerElement);
if (availablePartition == 0xFFFFFFFF)
{
tempTetrahedrons[numUnpartitionedTetrahedrons++] = i;
continue;
}
tetrahedronsPerPartition[availablePartition]++;
}
PxU32 partitionStartIndex = 0;
while (numUnpartitionedTetrahedrons > 0)
{
//initialize the partition progress counter to be zero
PxMemZero(partitionProgresses, sizeof(PxU32) * numVerts);
partitionStartIndex += MAX_NUM_PARTITIONS;
//Keep partitioning the un-partitioned constraints and blat the whole thing to 0!
tetrahedronsPerPartition.resize(MAX_NUM_PARTITIONS + tetrahedronsPerPartition.size());
PxMemZero(tetrahedronsPerPartition.begin() + partitionStartIndex, sizeof(PxU32) * MAX_NUM_PARTITIONS);
PxU32 newNumUnpartitionedConstraints = 0;
for (PxU32 i = 0; i < numUnpartitionedTetrahedrons; ++i)
{
const PxU32 tetInd = tempTetrahedrons[i];
const TetrahedronT<PxU32>* tet = &tets[tetInd];
const PxU32 availablePartition = computeTetrahedronPartition(tet, partitionStartIndex, partitionProgresses, numTetsPerElement);
if (availablePartition == 0xFFFFFFFF)
{
tempTetrahedrons[newNumUnpartitionedConstraints++] = tetInd;
continue;
}
tetrahedronsPerPartition[availablePartition]++;
}
numUnpartitionedTetrahedrons = newNumUnpartitionedConstraints;
}
}
void writeTetrahedrons(const TetrahedronT<PxU32>* tets, const PxU32 numTets, const PxU32 numVerts, const PxU32 numTetsPerElement,
PxU32* partitionProgresses, PxU32* tempTetrahedrons, PxU32* orderedTetrahedrons,
PxU32* accumulatedTetrahedronPerPartition)
{
//initialize the partition progress counter to be zero
PxMemZero(partitionProgresses, sizeof(PxU32) * numVerts);
PxU32 numUnpartitionedTetrahedrons = 0;
for (PxU32 i = 0; i < numTets; i += numTetsPerElement)
{
const TetrahedronT<PxU32>* tet = &tets[i];
const PxU32 availablePartition = computeTetrahedronPartition(tet, 0, partitionProgresses, numTetsPerElement);
if (availablePartition == 0xFFFFFFFF)
{
tempTetrahedrons[numUnpartitionedTetrahedrons++] = i;
continue;
}
//output tetrahedron
orderedTetrahedrons[accumulatedTetrahedronPerPartition[availablePartition]++] = i;
}
PxU32 partitionStartIndex = 0;
while (numUnpartitionedTetrahedrons > 0)
{
//initialize the partition progress counter to be zero
PxMemZero(partitionProgresses, sizeof(PxU32) * numVerts);
partitionStartIndex += MAX_NUM_PARTITIONS;
PxU32 newNumUnpartitionedConstraints = 0;
for (PxU32 i = 0; i < numUnpartitionedTetrahedrons; ++i)
{
const PxU32 tetInd = tempTetrahedrons[i];
const TetrahedronT<PxU32>* tet = &tets[tetInd];
const PxU32 availablePartition = computeTetrahedronPartition(tet, partitionStartIndex, partitionProgresses, numTetsPerElement);
if (availablePartition == 0xFFFFFFFF)
{
tempTetrahedrons[newNumUnpartitionedConstraints++] = tetInd;
continue;
}
//output tetrahedrons
orderedTetrahedrons[accumulatedTetrahedronPerPartition[availablePartition]++] = tetInd;
}
numUnpartitionedTetrahedrons = newNumUnpartitionedConstraints;
}
}
PxU32* computeGridModelTetrahedronPartitions(const TetrahedronMeshData& simulationMesh, SoftBodySimulationData& simulationData)
{
const PxU32 numTets = simulationMesh.mNbTetrahedrons;
const PxU32 numVerts = simulationMesh.mNbVertices;
//each grid model verts has a partition progress counter
PxU32* partitionProgresses = PX_ALLOCATE(PxU32, numVerts, "partitionProgress");
//this store the tetrahedron index for the unpartitioned tetrahedrons
PxU32* tempTetrahedrons = PX_ALLOCATE(PxU32, numTets, "tempTetrahedrons");
PxArray<PxU32> tetrahedronsPerPartition;
tetrahedronsPerPartition.reserve(MAX_NUM_PARTITIONS);
tetrahedronsPerPartition.forceSize_Unsafe(MAX_NUM_PARTITIONS);
PxMemZero(tetrahedronsPerPartition.begin(), sizeof(PxU32) * MAX_NUM_PARTITIONS);
const TetrahedronT<PxU32>* tetGM = reinterpret_cast<TetrahedronT<PxU32>*>(simulationMesh.mTetrahedrons);
classifyTetrahedrons(tetGM, numTets, numVerts, simulationData.mNumTetsPerElement, partitionProgresses,
tempTetrahedrons, tetrahedronsPerPartition);
//compute number of partitions
PxU32 maxPartition = 0;
for (PxU32 a = 0; a < tetrahedronsPerPartition.size(); ++a, maxPartition++)
{
if (tetrahedronsPerPartition[a] == 0)
break;
}
PxU32* accumulatedTetrahedronPerPartition = PX_ALLOCATE(PxU32, maxPartition, "accumulatedTetrahedronPerPartition");
//compute run sum
PxU32 accumulation = 0;
for (PxU32 a = 0; a < maxPartition; ++a)
{
PxU32 count = tetrahedronsPerPartition[a];
accumulatedTetrahedronPerPartition[a] = accumulation;
accumulation += count;
}
PX_ASSERT(accumulation*simulationData.mNumTetsPerElement == numTets);
simulationData.mGridModelOrderedTetrahedrons = PX_ALLOCATE(PxU32, numTets, "mGridModelPartitionTetrahedrons");
simulationData.mGridModelNbPartitions = maxPartition;
PxU32* orderedTetrahedrons = simulationData.mGridModelOrderedTetrahedrons;
writeTetrahedrons(tetGM, numTets, numVerts, simulationData.mNumTetsPerElement, partitionProgresses, tempTetrahedrons,
orderedTetrahedrons, accumulatedTetrahedronPerPartition);
PX_FREE(partitionProgresses);
PX_FREE(tempTetrahedrons);
return accumulatedTetrahedronPerPartition;
}
bool findSlot(const TetrahedronT<PxU32>* tetraIndices, bool* occupied, const PxU32 tetrahedronIdx,
const PxU32 offset, const PxU32 sVertInd, const PxU32 workIndex)
{
const TetrahedronT<PxU32>& tetraInd = tetraIndices[tetrahedronIdx];
for (PxU32 i = 0; i < 4; ++i)
{
const PxU32 dVertInd = i * offset + workIndex;
if (sVertInd == tetraInd.v[i] && (!occupied[dVertInd]))
{
occupied[dVertInd] = true;
return true;
}
}
return false;
}
//output to remapOutput
bool findSlot(const TetrahedronT<PxU32>* tetraIndices, bool* occupied, const PxU32 tetrahedronIdx,
const PxU32 offset, const PxU32 sVertInd, const PxU32 sVertIndOffset, PxU32* remapOutput,
PxU32* accumulatedWriteBackIndex, const PxU32 workIndex)
{
const TetrahedronT<PxU32>& tetraInd = tetraIndices[tetrahedronIdx];
for (PxU32 i = 0; i < 4; ++i)
{
const PxU32 dVertIndOffset = i * offset + workIndex;
if (sVertInd == tetraInd.v[i] && (!occupied[dVertIndOffset]))
{
remapOutput[sVertIndOffset] = dVertIndOffset;
accumulatedWriteBackIndex[dVertIndOffset] = sVertIndOffset;
occupied[dVertIndOffset] = true;
return true;
}
}
return false;
}
void computeNumberOfCopiesPerVerts(const PxU32 maximumPartitions, PxU32* combineAccumulatedTetraPerPartitions,
const TetrahedronT<PxU32>* tetraIndices, const PxU32* orderedTetrahedrons, const PxU32 offset, bool* occupied, PxU32* numCopiesEachVerts)
{
//compute numCopiesEachVerts
PxU32 startId = 0;
for (PxU32 i = 0; i < maximumPartitions; ++i)
{
PxU32 endId = combineAccumulatedTetraPerPartitions[i];
for (PxU32 j = startId; j < endId; ++j)
{
const PxU32 tetrahedronInd = orderedTetrahedrons[j];
const TetrahedronT<PxU32>& tetraInd = tetraIndices[tetrahedronInd];
for (PxU32 b = 0; b < 4; ++b)
{
const PxU32 vertInd = tetraInd.v[b];
bool found = false;
for (PxU32 k = i + 1; k < maximumPartitions; ++k)
{
const PxU32 tStartId = combineAccumulatedTetraPerPartitions[k - 1];
const PxU32 tEndId = combineAccumulatedTetraPerPartitions[k];
bool foundSlotInThisPartition = false;
for (PxU32 a = tStartId; a < tEndId; ++a)
{
const PxU32 otherTetrahedronInd = orderedTetrahedrons[a];
if (findSlot(tetraIndices, occupied, otherTetrahedronInd, offset, vertInd, a))
{
foundSlotInThisPartition = true;
break;
}
}
if (foundSlotInThisPartition)
{
found = true;
break;
}
}
if (!found)
{
numCopiesEachVerts[vertInd]++;
}
}
}
startId = endId;
}
}
//compute remapOutput
void computeRemapOutputForVertsAndAccumulatedBuffer(const PxU32 maximumPartitions, PxU32* combineAccumulatedTetraPerPartitions,
const TetrahedronT<PxU32>* tetraIndices, const PxU32* orderedTetrahedrons, const PxU32 offset, bool* occupied, PxU32* tempNumCopiesEachVerts, const PxU32* accumulatedCopies,
const PxU32 numVerts, PxU32* remapOutput,
PxU32* accumulatedWriteBackIndex, const PxU32 totalNumCopies)
{
PxMemZero(tempNumCopiesEachVerts, sizeof(PxU32) * numVerts);
const PxU32 totalNumVerts = offset * 4;
PxMemZero(occupied, sizeof(bool) * totalNumVerts);
//initialize accumulatedWriteBackIndex to itself
for (PxU32 i = 0; i < totalNumVerts; ++i)
{
accumulatedWriteBackIndex[i] = i;
}
//compute remap output
PxU32 startId = 0;
for (PxU32 i = 0; i < maximumPartitions; ++i)
{
const PxU32 endId = combineAccumulatedTetraPerPartitions[i];
for (PxU32 j = startId; j < endId; ++j)
{
const PxU32 tetrahedronsIdx = orderedTetrahedrons[j];
const TetrahedronT<PxU32>& tetraInd = tetraIndices[tetrahedronsIdx];
for (PxU32 b = 0; b < 4; ++b)
{
const PxU32 vertInd = tetraInd.v[b];
const PxU32 vertOffset = j + offset * b;
bool found = false;
for (PxU32 k = i + 1; k < maximumPartitions; ++k)
{
const PxU32 tStartId = combineAccumulatedTetraPerPartitions[k-1];
const PxU32 tEndId = combineAccumulatedTetraPerPartitions[k];
bool foundSlotInThisPartition = false;
for (PxU32 a = tStartId; a < tEndId; ++a)
{
const PxU32 otherTetrahedronInd = orderedTetrahedrons[a];
if (findSlot(tetraIndices, occupied, otherTetrahedronInd, offset, vertInd,
vertOffset, remapOutput, accumulatedWriteBackIndex, a))
{
foundSlotInThisPartition = true;
break;
}
}
if (foundSlotInThisPartition)
{
found = true;
break;
}
}
if (!found)
{
const PxU32 abVertStartInd = vertInd == 0 ? 0 : accumulatedCopies[vertInd - 1];
const PxU32 index = totalNumVerts + abVertStartInd + tempNumCopiesEachVerts[vertInd];
//remapOutput for the current vert index
remapOutput[vertOffset] = index;
//const PxU32 writebackIndex = abVertStartInd + tempNumCopiesEachVerts[vertInd];
//accumulatedWriteBackIndex[writebackIndex] = vertOffset;
remapOutput[index] = vertOffset;
tempNumCopiesEachVerts[vertInd]++;
}
}
}
startId = endId;
}
//PxU32* writeBackBuffer = &accumulatedWriteBackIndex[totalNumVerts];
PxU32* accumulatedBufferRemap = &remapOutput[totalNumVerts];
for (PxU32 i = 0; i < totalNumCopies; ++i)
{
PxU32 originalIndex = accumulatedBufferRemap[i];
PxU32 wbIndex0, wbIndex1;
do
{
wbIndex0 = originalIndex;
wbIndex1 = accumulatedWriteBackIndex[wbIndex0];
originalIndex = wbIndex1;
} while (wbIndex0 != wbIndex1);
accumulatedBufferRemap[i] = wbIndex1;
}
}
//void combineGridModelPartitions(const TetrahedronMeshData& simulationMesh, SoftBodySimulationData& simulationData, PxU32** accumulatedTetrahedronPerPartitions)
//{
// const PxU32 numTets = simulationMesh.mNbTetrahedrons;
// const PxU32 numVerts = simulationMesh.mNbVertices;
// const PxU32 nbPartitions = simulationData.mGridModelNbPartitions;
// PxU32* accumulatedTetrahedronPerPartition = *accumulatedTetrahedronPerPartitions;
//
// const PxU32 maximumPartitions = 8;
// PxU32* combineAccumulatedTetraPerPartitions = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32) * maximumPartitions, "combineAccumulatedTetraPerPartitions"));
// simulationData.mGMAccumulatedPartitionsCP = combineAccumulatedTetraPerPartitions;
//
// PxMemZero(combineAccumulatedTetraPerPartitions, sizeof(PxU32) * maximumPartitions);
// const PxU32 maxAccumulatedPartitionsPerPartitions = (nbPartitions + maximumPartitions - 1) / maximumPartitions;
// PxU32* orderedTetrahedrons = simulationData.mGridModelOrderedTetrahedrons;
// PxU32* tempOrderedTetrahedrons = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32) * numTets, "tempOrderedTetrahedrons"));
// const TetrahedronT<PxU32>* tetrahedrons = reinterpret_cast<TetrahedronT<PxU32>*>( simulationMesh.mTetrahedrons);
//
// const PxU32 maxAccumulatedCP = (nbPartitions + maximumPartitions - 1) / maximumPartitions;
// const PxU32 partitionArraySize = maxAccumulatedCP * maximumPartitions;
// const PxU32 nbPartitionTables = partitionArraySize * numVerts;
// PxU32* tempPartitionTablePerVert = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32) * nbPartitionTables, "tempPartitionTablePerVert"));
// PxU32* tempRemapTablePerVert = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32) * nbPartitionTables, "tempRemapTablePerVert"));
// PxU32* pullIndices = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32) * numTets*4, "tempRemapTablePerVert"));
// PxU32* lastRef = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32)* maxAccumulatedCP*numVerts, "refCounts"));
// PxU32* accumulatedCopiesEachVerts = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32) * numVerts, "accumulatedCopiesEachVerts"));
// simulationData.mGMAccumulatedCopiesCP = accumulatedCopiesEachVerts;
// PxU32* tempNumCopiesEachVerts = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32) * numVerts, "numCopiesEachVerts"));
// PxMemZero(tempNumCopiesEachVerts, sizeof(PxU32) * numVerts);
// PxMemSet(pullIndices, 0xffffffff, sizeof(PxU32)*numTets*4);
// PxMemSet(lastRef, 0xffffffff, sizeof(PxU32)*maxAccumulatedCP*numVerts);
// //initialize partitionTablePerVert
// for (PxU32 i = 0; i < nbPartitionTables; ++i)
// {
// tempPartitionTablePerVert[i] = 0xffffffff;
// tempRemapTablePerVert[i] = 0xffffffff;
//
// }
// PxU32 maxTetPerPartitions = 0;
// PxU32 count = 0;
// const PxU32 totalNumVerts = numTets * 4;
// PxU32 totalCopies = numVerts * maxAccumulatedCP;
// simulationData.mGridModelNbPartitions = maximumPartitions;
// simulationData.mGMRemapOutputSize = totalNumVerts + totalCopies;
// ////allocate enough memory for the verts and the accumulation buffer
// //PxVec4* orderedVertsInMassCP = reinterpret_cast<PxVec4*>(PX_ALLOC(sizeof(PxVec4) * totalNumVerts, "mGMOrderedVertInvMassCP"));
// //data.mGMOrderedVertInvMassCP = orderedVertsInMassCP;
// //compute remap table
// PxU32* remapOutput = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32) * simulationData.mGMRemapOutputSize, "remapOutput"));
// simulationData.mGMRemapOutputCP = remapOutput;
// for (PxU32 i = 0; i < maximumPartitions; ++i)
// {
// PxU32 totalTets = 0;
// for (PxU32 j = 0; j < maxAccumulatedPartitionsPerPartitions; ++j)
// {
// PxU32 partitionId = i + maximumPartitions * j;
// if (partitionId < nbPartitions)
// {
// const PxU32 startInd = partitionId == 0 ? 0 : accumulatedTetrahedronPerPartition[partitionId - 1];
// const PxU32 endInd = accumulatedTetrahedronPerPartition[partitionId];
// for (PxU32 k = startInd; k < endInd; ++k)
// {
// const PxU32 tetraInd = orderedTetrahedrons[k];
// tempOrderedTetrahedrons[count] = tetraInd;
// //tempCombinedTetraIndices[count] = tetGM[tetraInd];
// //tempTetRestPose[count] = tetRestPose[tetraInd];
// PxU32 index = i * maxAccumulatedCP + j;
// TetrahedronT<PxU32> tet = tetrahedrons[tetraInd];
// tempPartitionTablePerVert[tet.v[0] * partitionArraySize + index] = count;
// tempPartitionTablePerVert[tet.v[1] * partitionArraySize + index] = count + numTets;
// tempPartitionTablePerVert[tet.v[2] * partitionArraySize + index] = count + numTets * 2;
// tempPartitionTablePerVert[tet.v[3] * partitionArraySize + index] = count + numTets * 3;
// if (lastRef[tet.v[0] * maxAccumulatedCP + j] == 0xffffffff)
// {
// pullIndices[4 * count] = tet.v[0];
// tempNumCopiesEachVerts[tet.v[0]]++;
// }
// else
// {
// remapOutput[lastRef[tet.v[0] * maxAccumulatedCP + j]] = count;
// }
// lastRef[tet.v[0] * maxAccumulatedCP + j] = 4*count;
// if (lastRef[tet.v[1] * maxAccumulatedCP + j] == 0xffffffff)
// {
// pullIndices[4 * count + 1] = tet.v[1];
// tempNumCopiesEachVerts[tet.v[1]]++;
// }
// else
// {
// remapOutput[lastRef[tet.v[1] * maxAccumulatedCP + j]] = count + numTets;
// }
// lastRef[tet.v[1] * maxAccumulatedCP + j] = 4*count + 1;
// if (lastRef[tet.v[2] * maxAccumulatedCP + j] == 0xffffffff)
// {
// pullIndices[4 * count + 2] = tet.v[2];
// tempNumCopiesEachVerts[tet.v[2]]++;
//
// }
// else
// {
// remapOutput[lastRef[tet.v[2] * maxAccumulatedCP + j]] = count + 2*numTets;
// }
// lastRef[tet.v[2] * maxAccumulatedCP + j] = 4*count+2;
// if (lastRef[tet.v[3] * maxAccumulatedCP + j] == 0xffffffff)
// {
// pullIndices[4 * count + 3] = tet.v[3];
// tempNumCopiesEachVerts[tet.v[3]]++;
// }
// else
// {
// remapOutput[lastRef[tet.v[3] * maxAccumulatedCP + j]] = count + 3*numTets;
// }
// lastRef[tet.v[3] * maxAccumulatedCP + j] = 4*count+3;
// count++;
// }
// totalTets += (endInd - startInd);
// }
// }
// combineAccumulatedTetraPerPartitions[i] = count;
// maxTetPerPartitions = PxMax(maxTetPerPartitions, totalTets);
// }
// //Last bit - output accumulation buffer...
//
// PxU32 outIndex = 0;
// simulationData.mGridModelMaxTetsPerPartitions = maxTetPerPartitions;
// //If this commented out, we don't use combined partition anymore
// PxMemCopy(orderedTetrahedrons, tempOrderedTetrahedrons, sizeof(PxU32) * numTets);
// /*bool* tempOccupied = reinterpret_cast <bool*>( PX_ALLOC(sizeof(bool) * totalNumVerts, "tempOccupied"));
// PxMemZero(tempOccupied, sizeof(bool) * totalNumVerts);*/
//
// //data.mGridModelNbPartitions = maximumPartitions;
// //data.mGMRemapOutputSize = totalNumVerts + totalCopies;
// simulationData.mGridModelNbPartitions = maximumPartitions;
// simulationData.mGMRemapOutputSize = totalNumVerts + totalCopies;
// //data.mGMOrderedVertInvMassCP = orderedVertsInMassCP;
// //mGMOrderedVertInvMassCP = orderedVertsInMassCP;
// //Last bit - output accumulation buffer...
// outIndex = 0;
// for (PxU32 i = 0; i < numVerts; ++i)
// {
// for (PxU32 j = 0; j < maxAccumulatedCP; ++j)
// {
// if (lastRef[i * maxAccumulatedCP + j] != 0xffffffff)
// {
// remapOutput[lastRef[i * maxAccumulatedCP + j]] = totalNumVerts + outIndex++;
// }
// }
// accumulatedCopiesEachVerts[i] = outIndex;
// }
// PX_ASSERT(count == numTets);
// simulationData.mGridModelMaxTetsPerPartitions = maxTetPerPartitions;
// simulationData.mGMPullIndices = pullIndices;
// //If this commented out, we don't use combined partition anymore
// PxMemCopy(orderedTetrahedrons, tempOrderedTetrahedrons, sizeof(PxU32) * numTets);
// PX_FREE(tempNumCopiesEachVerts);
// PX_FREE(tempOrderedTetrahedrons);
// PX_FREE(tempPartitionTablePerVert);
// PX_FREE(tempRemapTablePerVert);
//
// PX_FREE(lastRef);
//}
PxU32 setBit(PxU32 value, PxU32 bitLocation, bool bitState)
{
if (bitState)
return value | (1 << bitLocation);
else
return value & (~(1 << bitLocation));
}
void combineGridModelPartitions(const TetrahedronMeshData& simulationMesh, SoftBodySimulationData& simulationData, PxU32** accumulatedTetrahedronPerPartitions)
{
const PxU32 numTets = simulationMesh.mNbTetrahedrons;
const PxU32 numVerts = simulationMesh.mNbVertices;
const PxU32 nbPartitions = simulationData.mGridModelNbPartitions;
PxU32* accumulatedTetrahedronPerPartition = *accumulatedTetrahedronPerPartitions;
const PxU32 maximumPartitions = 8;
PxU32* combineAccumulatedTetraPerPartitions = PX_ALLOCATE(PxU32, maximumPartitions, "combineAccumulatedTetraPerPartitions");
simulationData.mGMAccumulatedPartitionsCP = combineAccumulatedTetraPerPartitions;
PxMemZero(combineAccumulatedTetraPerPartitions, sizeof(PxU32) * maximumPartitions);
const PxU32 maxAccumulatedPartitionsPerPartitions = (nbPartitions + maximumPartitions - 1) / maximumPartitions;
PxU32* orderedTetrahedrons = simulationData.mGridModelOrderedTetrahedrons;
PxU32* tempOrderedTetrahedrons = PX_ALLOCATE(PxU32, numTets, "tempOrderedTetrahedrons");
const TetrahedronT<PxU32>* tetrahedrons = reinterpret_cast<TetrahedronT<PxU32>*>(simulationMesh.mTetrahedrons);
const PxU32 maxAccumulatedCP = (nbPartitions + maximumPartitions - 1) / maximumPartitions;
const PxU32 partitionArraySize = maxAccumulatedCP * maximumPartitions;
const PxU32 nbPartitionTables = partitionArraySize * numVerts;
PxU32* tempPartitionTablePerVert = PX_ALLOCATE(PxU32, nbPartitionTables, "tempPartitionTablePerVert");
PxU32* tempRemapTablePerVert = PX_ALLOCATE(PxU32, nbPartitionTables, "tempRemapTablePerVert");
PxU32* pullIndices = PX_ALLOCATE(PxU32, (numTets * 4), "tempRemapTablePerVert");
PxU32* lastRef = PX_ALLOCATE(PxU32, (maxAccumulatedCP*numVerts), "refCounts");
PxU32* accumulatedCopiesEachVerts = PX_ALLOCATE(PxU32, numVerts, "accumulatedCopiesEachVerts");
simulationData.mGMAccumulatedCopiesCP = accumulatedCopiesEachVerts;
PxU32* tempNumCopiesEachVerts = PX_ALLOCATE(PxU32, numVerts, "numCopiesEachVerts");
PxMemZero(tempNumCopiesEachVerts, sizeof(PxU32) * numVerts);
PxMemSet(pullIndices, 0xffffffff, sizeof(PxU32)*numTets * 4);
PxMemSet(lastRef, 0xffffffff, sizeof(PxU32)*maxAccumulatedCP*numVerts);
//initialize partitionTablePerVert
for (PxU32 i = 0; i < nbPartitionTables; ++i)
{
tempPartitionTablePerVert[i] = 0xffffffff;
tempRemapTablePerVert[i] = 0xffffffff;
}
PxU32 maxTetPerPartitions = 0;
PxU32 count = 0;
const PxU32 totalNumVerts = numTets * 4;
PxU32 totalCopies = numVerts * maxAccumulatedCP;
simulationData.mGridModelNbPartitions = maximumPartitions;
simulationData.mGMRemapOutputSize = totalNumVerts + totalCopies;
////allocate enough memory for the verts and the accumulation buffer
//PxVec4* orderedVertsInMassCP = reinterpret_cast<PxVec4*>(PX_ALLOC(sizeof(PxVec4) * totalNumVerts, "mGMOrderedVertInvMassCP"));
//data.mGMOrderedVertInvMassCP = orderedVertsInMassCP;
//compute remap table
PxU32* remapOutput = PX_ALLOCATE(PxU32, simulationData.mGMRemapOutputSize, "remapOutput");
simulationData.mGMRemapOutputCP = remapOutput;
for (PxU32 i = 0; i < maximumPartitions; ++i)
{
PxU32 totalTets = 0;
for (PxU32 j = 0; j < maxAccumulatedPartitionsPerPartitions; ++j)
{
PxU32 partitionId = i + maximumPartitions * j;
if (partitionId < nbPartitions)
{
const PxU32 startInd = partitionId == 0 ? 0 : accumulatedTetrahedronPerPartition[partitionId - 1];
const PxU32 endInd = accumulatedTetrahedronPerPartition[partitionId];
for (PxU32 k = startInd; k < endInd; ++k)
{
const PxU32 tetraInd = orderedTetrahedrons[k];
tempOrderedTetrahedrons[count] = tetraInd;
//tempCombinedTetraIndices[count] = tetGM[tetraInd];
//tempTetRestPose[count] = tetRestPose[tetraInd];
PxU32 index = i * maxAccumulatedCP + j;
TetrahedronT<PxU32> tet = tetrahedrons[tetraInd];
tempPartitionTablePerVert[tet.v[0] * partitionArraySize + index] = count;
tempPartitionTablePerVert[tet.v[1] * partitionArraySize + index] = count + numTets;
tempPartitionTablePerVert[tet.v[2] * partitionArraySize + index] = count + numTets * 2;
tempPartitionTablePerVert[tet.v[3] * partitionArraySize + index] = count + numTets * 3;
if (lastRef[tet.v[0] * maxAccumulatedCP + j] == 0xffffffff)
{
pullIndices[4 * count] = tet.v[0];
tempNumCopiesEachVerts[tet.v[0]]++;
}
else
{
remapOutput[lastRef[tet.v[0] * maxAccumulatedCP + j]] = count;
}
lastRef[tet.v[0] * maxAccumulatedCP + j] = 4 * count;
if (lastRef[tet.v[1] * maxAccumulatedCP + j] == 0xffffffff)
{
pullIndices[4 * count + 1] = tet.v[1];
tempNumCopiesEachVerts[tet.v[1]]++;
}
else
{
remapOutput[lastRef[tet.v[1] * maxAccumulatedCP + j]] = count + numTets;
}
lastRef[tet.v[1] * maxAccumulatedCP + j] = 4 * count + 1;
if (lastRef[tet.v[2] * maxAccumulatedCP + j] == 0xffffffff)
{
pullIndices[4 * count + 2] = tet.v[2];
tempNumCopiesEachVerts[tet.v[2]]++;
}
else
{
remapOutput[lastRef[tet.v[2] * maxAccumulatedCP + j]] = count + 2 * numTets;
}
lastRef[tet.v[2] * maxAccumulatedCP + j] = 4 * count + 2;
if (lastRef[tet.v[3] * maxAccumulatedCP + j] == 0xffffffff)
{
pullIndices[4 * count + 3] = tet.v[3];
tempNumCopiesEachVerts[tet.v[3]]++;
}
else
{
remapOutput[lastRef[tet.v[3] * maxAccumulatedCP + j]] = count + 3 * numTets;
}
lastRef[tet.v[3] * maxAccumulatedCP + j] = 4 * count + 3;
count++;
}
totalTets += (endInd - startInd);
}
}
combineAccumulatedTetraPerPartitions[i] = count;
maxTetPerPartitions = PxMax(maxTetPerPartitions, totalTets);
}
//Last bit - output accumulation buffer...
PxU32 outIndex = 0;
for (PxU32 i = 0; i < numVerts; ++i)
{
for (PxU32 j = 0; j < maxAccumulatedCP; ++j)
{
if (lastRef[i * maxAccumulatedCP + j] != 0xffffffff)
{
remapOutput[lastRef[i * maxAccumulatedCP + j]] = totalNumVerts + outIndex++;
}
}
accumulatedCopiesEachVerts[i] = outIndex;
}
PX_ASSERT(count == numTets);
simulationData.mGridModelMaxTetsPerPartitions = maxTetPerPartitions;
simulationData.mGMPullIndices = pullIndices;
//If this commented out, we don't use combined partition anymore
PxMemCopy(orderedTetrahedrons, tempOrderedTetrahedrons, sizeof(PxU32) * numTets);
PX_FREE(tempNumCopiesEachVerts);
PX_FREE(tempOrderedTetrahedrons);
PX_FREE(tempPartitionTablePerVert);
PX_FREE(tempRemapTablePerVert);
PX_FREE(lastRef);
}
const PxI32 tetIndicesFromVoxels[8] = { 0, 1, 3, 14, 6, 11, 2, 18 };
//const PxI32 tets6PerVoxel[24] = { 0,1,6,2, 0,1,4,6, 1,4,6,5, 1,2,3,6, 1,3,7,6, 1,5,6,7 };
const PxI32 tetIndicesFromVoxelsA[8] = { 0, 5, 16, 2, 12, 3, 1, 9 };
const PxI32 tetIndicesFromVoxelsB[8] = { 5, 0, 3, 16, 2, 12, 9, 1 };
//const PxU32 tets5PerVoxel[] = {
// 0, 6, 3, 5, 0, 1, 5, 3, 6, 7, 3, 5, 4, 5, 6, 0, 2, 3, 0, 6,
// 1, 7, 4, 2, 1, 0, 2, 4, 7, 6, 4, 2, 5, 4, 1, 7, 3, 2, 7, 1 };
// 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
void combineGridModelPartitionsHexMesh(const TetrahedronMeshData& simulationMesh, SoftBodySimulationData& simulationData,
PxU32** accumulatedTetrahedronPerPartitions, PxU32 numTetsPerElement)
{
//const PxU32 numTets = simulationMesh.mNbTetrahedrons;
const PxU32 numElements = simulationMesh.mNbTetrahedrons/simulationData.mNumTetsPerElement;
const PxU32 numVerts = simulationMesh.mNbVertices;
const PxU32 NumVertsPerElement = 8;
const PxU32 nbPartitions = simulationData.mGridModelNbPartitions;
PxU32* accumulatedTetrahedronPerPartition = *accumulatedTetrahedronPerPartitions;
const PxU32 maximumPartitions = 8;
PxU32* combineAccumulatedTetraPerPartitions = PX_ALLOCATE(PxU32, maximumPartitions, "combineAccumulatedTetraPerPartitions");
simulationData.mGMAccumulatedPartitionsCP = combineAccumulatedTetraPerPartitions;
PxMemZero(combineAccumulatedTetraPerPartitions, sizeof(PxU32) * maximumPartitions);
const PxU32 maxAccumulatedPartitionsPerPartitions = (nbPartitions + maximumPartitions - 1) / maximumPartitions;
PxU32* orderedTetrahedrons = simulationData.mGridModelOrderedTetrahedrons;
PxU32* tempOrderedTetrahedrons = PX_ALLOCATE(PxU32, numElements, "tempOrderedTetrahedrons");
const TetrahedronT<PxU32>* tetrahedrons = reinterpret_cast<TetrahedronT<PxU32>*>(simulationMesh.mTetrahedrons);
const PxU32 maxAccumulatedCP = (nbPartitions + maximumPartitions - 1) / maximumPartitions;
const PxU32 partitionArraySize = maxAccumulatedCP * maximumPartitions;
const PxU32 nbPartitionTables = partitionArraySize * numVerts;
PxU32* tempPartitionTablePerVert = PX_ALLOCATE(PxU32, nbPartitionTables, "tempPartitionTablePerVert");
PxU32* tempRemapTablePerVert = PX_ALLOCATE(PxU32, nbPartitionTables, "tempRemapTablePerVert");
PxU32* pullIndices = PX_ALLOCATE(PxU32, (numElements * NumVertsPerElement), "tempRemapTablePerVert");
PxU32* lastRef = PX_ALLOCATE(PxU32, (maxAccumulatedCP*numVerts), "refCounts");
PxU32* accumulatedCopiesEachVerts = PX_ALLOCATE(PxU32, numVerts, "accumulatedCopiesEachVerts");
simulationData.mGMAccumulatedCopiesCP = accumulatedCopiesEachVerts;
PxU32* tempNumCopiesEachVerts = PX_ALLOCATE(PxU32, numVerts, "numCopiesEachVerts");
PxMemZero(tempNumCopiesEachVerts, sizeof(PxU32) * numVerts);
PxMemSet(pullIndices, 0xffffffff, sizeof(PxU32)*numElements * NumVertsPerElement);
PxMemSet(lastRef, 0xffffffff, sizeof(PxU32)*maxAccumulatedCP*numVerts);
//initialize partitionTablePerVert
for (PxU32 i = 0; i < nbPartitionTables; ++i)
{
tempPartitionTablePerVert[i] = 0xffffffff;
tempRemapTablePerVert[i] = 0xffffffff;
}
PxU32 maxTetPerPartitions = 0;
PxU32 count = 0;
const PxU32 totalNumVerts = numElements* NumVertsPerElement;
PxU32 totalCopies = numVerts * maxAccumulatedCP;
simulationData.mGridModelNbPartitions = maximumPartitions;
simulationData.mGMRemapOutputSize = totalNumVerts + totalCopies;
////allocate enough memory for the verts and the accumulation buffer
//PxVec4* orderedVertsInMassCP = reinterpret_cast<PxVec4*>(PX_ALLOC(sizeof(PxVec4) * totalNumVerts, "mGMOrderedVertInvMassCP"));
//data.mGMOrderedVertInvMassCP = orderedVertsInMassCP;
//compute remap table
PxU32* remapOutput = PX_ALLOCATE(PxU32, simulationData.mGMRemapOutputSize, "remapOutput");
simulationData.mGMRemapOutputCP = remapOutput;
for (PxU32 i = 0; i < maximumPartitions; ++i)
{
PxU32 totalTets = 0;
for (PxU32 j = 0; j < maxAccumulatedPartitionsPerPartitions; ++j)
{
PxU32 partitionId = i + maximumPartitions * j;
if (partitionId < nbPartitions)
{
const PxU32 startInd = partitionId == 0 ? 0 : accumulatedTetrahedronPerPartition[partitionId - 1];
const PxU32 endInd = accumulatedTetrahedronPerPartition[partitionId];
for (PxU32 k = startInd; k < endInd; ++k)
{
const PxU32 tetraInd = orderedTetrahedrons[k];
tempOrderedTetrahedrons[count] = tetraInd;
PxU32 index = i * maxAccumulatedCP + j;
const PxI32* map = NULL;
const PxU32* tetInds = reinterpret_cast<const PxU32*>(&tetrahedrons[tetraInd]);
//If 5 tets are used per voxel, some voxels have a flipped tetrahedron configuration
//Tetmaker uses the following table to generate 5 tets per hex. The first row is the standard configuration, the second row the flipped config.
//To distinguish the two, a pattern must be found that is only present in one of the two configurations
//While 5 tets get created, this leads to 20 indices. The flipped configuration references the same vertex at indices[0] and indices[19] while
//the default config references different tets at indices[0] and indices[19]. This means that this comparsion can reliably detect flipped configurations.
//const PxU32 tets5PerVoxel[] = {
// 0, 6, 3, 5, 0, 1, 5, 3, 6, 7, 3, 5, 4, 5, 6, 0, 2, 3, 0, 6,
// 1, 7, 4, 2, 1, 0, 2, 4, 7, 6, 4, 2, 5, 4, 1, 7, 3, 2, 7, 1
bool flipped = tetInds[0] == tetInds[19];
if (numTetsPerElement == 6)
{
map = tetIndicesFromVoxels;
}
else
{
if (!flipped)
map = tetIndicesFromVoxelsA;
else
map = tetIndicesFromVoxelsB;
}
for (PxU32 v = 0; v < 4; ++v)
{
PxU32 vertInd = tetInds[map[v]];
tempPartitionTablePerVert[vertInd * partitionArraySize + index] = count + numElements * v;
if (lastRef[vertInd * maxAccumulatedCP + j] == 0xffffffff)
{
pullIndices[4 * count + v] = vertInd;
tempNumCopiesEachVerts[vertInd]++;
}
else
{
remapOutput[lastRef[vertInd * maxAccumulatedCP + j]] = count + v * numElements;
}
lastRef[vertInd * maxAccumulatedCP + j] = 4 * count + v;
}
for (PxU32 v = 0; v < 4; ++v)
{
//vertex index
PxU32 vertInd = tetInds[map[v+4]];
//Where the vertex data will be written to/read from
tempPartitionTablePerVert[vertInd * partitionArraySize + index] = count + numElements * (v+4);
if (lastRef[vertInd * maxAccumulatedCP + j] == 0xffffffff)
{
pullIndices[4 * (count + numElements) + v] = vertInd;
tempNumCopiesEachVerts[vertInd]++;
}
else
{
remapOutput[lastRef[vertInd * maxAccumulatedCP + j]] = count + (v+4) * numElements;
}
lastRef[vertInd * maxAccumulatedCP + j] = 4 * (numElements + count) + v;
}
if (numTetsPerElement == 5)
{
PxU32 ind = pullIndices[4 * count];
ind = setBit(ind, 31, flipped);
pullIndices[4 * count /*+ v*/] = ind;
}
count++;
}
totalTets += (endInd - startInd);
}
}
combineAccumulatedTetraPerPartitions[i] = count;
maxTetPerPartitions = PxMax(maxTetPerPartitions, totalTets);
}
//Last bit - output accumulation buffer...
PxU32 outIndex = 0;
for (PxU32 i = 0; i < numVerts; ++i)
{
for (PxU32 j = 0; j < maxAccumulatedCP; ++j)
{
if (lastRef[i * maxAccumulatedCP + j] != 0xffffffff)
{
remapOutput[lastRef[i * maxAccumulatedCP + j]] = totalNumVerts + outIndex++;
}
}
accumulatedCopiesEachVerts[i] = outIndex;
}
PX_ASSERT(count == numElements);
simulationData.mGridModelMaxTetsPerPartitions = maxTetPerPartitions;
simulationData.mGMPullIndices = pullIndices;
//If this commented out, we don't use combined partition anymore
PxMemCopy(orderedTetrahedrons, tempOrderedTetrahedrons, sizeof(PxU32) * numElements);
PX_FREE(tempNumCopiesEachVerts);
PX_FREE(tempOrderedTetrahedrons);
PX_FREE(tempPartitionTablePerVert);
PX_FREE(tempRemapTablePerVert);
PX_FREE(lastRef);
}
struct DistanceCheck
{
//input
PxVec3* mVerts;
IndTetrahedron32* mTetrahedron32;
PxVec3 mOriginalVert;
//output
PxU32 mTetInd;
PxReal mDistanceSq;
PxVec3 mClosestPoint;
//these data are for validation only
PxU32 mNbPrimsPerLeaf;
PxU32 mNbPrims;
};
static bool gDistanceNodeCheckCallback(const AABBTreeNode* current, void* userData)
{
DistanceCheck* Data = reinterpret_cast<DistanceCheck*>(userData);
const PxVec3& p = Data->mOriginalVert;
const AABBTreeNode* posNode = current->getPos();
const AABBTreeNode* negNode = current->getNeg();
PxReal distanceSqP = PX_MAX_F32;
if (posNode)
{
const PxBounds3& posAABB = posNode->getAABB();
const PxVec3 posClosest = posAABB.minimum.maximum(p.minimum(posAABB.maximum));
distanceSqP = (posClosest - p).magnitudeSquared();
}
PxReal distanceSqN = PX_MAX_F32;
if (negNode)
{
const PxBounds3& negAABB = negNode->getAABB();
const PxVec3 negClosest = negAABB.minimum.maximum(p.minimum(negAABB.maximum));
distanceSqN = (negClosest - p).magnitudeSquared();
}
return distanceSqP <= distanceSqN ? true : false;
}
static bool gDistanceCheckCallback(const AABBTreeNode* current, PxU32 /*depth*/, void* userData)
{
DistanceCheck* Data = reinterpret_cast<DistanceCheck*>(userData);
const PxVec3& p = Data->mOriginalVert;
if (current->isLeaf())
{
const PxU32 n = current->getNbPrimitives();
PX_ASSERT(n <= Data->mNbPrimsPerLeaf);
PxU32* Prims = const_cast<PxU32*>(current->getPrimitives());
PX_UNUSED(Prims);
const PxVec3* verts = Data->mVerts;
for (PxU32 i = 0; i < n; i++)
{
PX_ASSERT(Prims[i] < Data->mNbPrims);
const PxU32 tetId = Prims[i];
const IndTetrahedron32& tetrahedron = Data->mTetrahedron32[tetId];
PX_UNUSED(tetrahedron);
const PxVec3 a = verts[tetrahedron.mRef[0]];
const PxVec3 b = verts[tetrahedron.mRef[1]];
const PxVec3 c = verts[tetrahedron.mRef[2]];
const PxVec3 d = verts[tetrahedron.mRef[3]];
//compute distance between the vert and the tetrahedron
const PxVec4 result = PointOutsideOfPlane4(p, a, b, c, d);
if (result.x >= 0.f && result.y >= 0.f && result.z >= 0.f && result.w >= 0.f)
{
//point is inside the tetrahedron
Data->mClosestPoint = closestPtPointTetrahedron(p, a, b, c, d);
Data->mDistanceSq = 0.f;
Data->mTetInd = tetId;
}
else
{
//point is outside the tetrahedron
const PxVec3 closestP = closestPtPointTetrahedron(p, a, b, c, d, result);
const PxReal distanceSq = (closestP - p).magnitudeSquared();
if (distanceSq < Data->mDistanceSq)
{
Data->mClosestPoint = closestP;
Data->mDistanceSq = distanceSq;
Data->mTetInd = tetId;
}
}
}
}
else
{
//compute distance
const PxBounds3& aabb = current->getAABB();
const PxVec3& min = aabb.minimum;
const PxVec3& max = aabb.maximum;
const PxVec3 closest = min.maximum(p.minimum(max));
PxReal distanceSq = (closest-p).magnitudeSquared();
if (distanceSq > Data->mDistanceSq)
return false;
}
return true;
}
struct OverlapCheck
{
//input
IndTetrahedron32 mColTetrahedron32;
PxVec3* mColMeshVerts;
PxBounds3 mColTetBound;
PxVec3* mSimMeshVerts;
IndTetrahedron32* mSimMeshTetra;
//output
PxArray<PxU32> mSimTetraIndices;
//these data are for validation only
PxU32 mNbPrimsPerLeaf;
PxU32 mNbPrims;
};
static bool gOverlapCallback(const AABBTreeNode* current, PxU32 /*depth*/, void* userData)
{
OverlapCheck* Data = reinterpret_cast<OverlapCheck*>(userData);
const PxBounds3& bound = Data->mColTetBound;
if (current->isLeaf())
{
const PxU32 n = current->getNbPrimitives();
PX_ASSERT(n <= Data->mNbPrimsPerLeaf);
PxU32* Prims = const_cast<PxU32*>(current->getPrimitives());
PX_UNUSED(Prims);
const IndTetrahedron32& colTetInd = Data->mColTetrahedron32;
const PxVec3 a0 = Data->mColMeshVerts[colTetInd.mRef[0]];
const PxVec3 a1 = Data->mColMeshVerts[colTetInd.mRef[1]];
const PxVec3 a2 = Data->mColMeshVerts[colTetInd.mRef[2]];
const PxVec3 a3 = Data->mColMeshVerts[colTetInd.mRef[3]];
const PxVec3 center0 = (a0 + a1 + a2 + a3) * 0.25f;
TetrahedronV tetV(aos::V3LoadU(a0), aos::V3LoadU(a1), aos::V3LoadU(a2),
aos::V3LoadU(a3));
const LocalConvex<TetrahedronV> convexA(tetV);
aos::FloatV contactDist = aos::FLoad(1e-4f);
const PxVec3* verts = Data->mSimMeshVerts;
for (PxU32 i = 0; i < n; i++)
{
PX_ASSERT(Prims[i] < Data->mNbPrims);
const PxU32 tetId = Prims[i];
const IndTetrahedron32& tetrahedron = Data->mSimMeshTetra[tetId];
const PxVec3 b0 = verts[tetrahedron.mRef[0]];
const PxVec3 b1 = verts[tetrahedron.mRef[1]];
const PxVec3 b2 = verts[tetrahedron.mRef[2]];
const PxVec3 b3 = verts[tetrahedron.mRef[3]];
const PxVec3 center1 = (b0 + b1 + b2 + b3) * 0.25f;
const PxVec3 dir = center1 - center0;
TetrahedronV tetV2(aos::V3LoadU(b0), aos::V3LoadU(b1), aos::V3LoadU(b2),
aos::V3LoadU(b3));
tetV2.setMinMargin(aos::FEps());
const LocalConvex<TetrahedronV> convexB(tetV2);
GjkOutput output;
#ifdef USE_GJK_VIRTUAL
GjkStatus status = testGjk(convexA, convexB, aos::V3LoadU(dir), contactDist, output.closestA,
output.closestB, output.normal, output.penDep);
#else
GjkStatus status = gjk(convexA, convexB, aos::V3LoadU(dir), contactDist, output.closestA,
output.closestB, output.normal, output.penDep);
#endif
if (status == GjkStatus::GJK_CLOSE || status == GjkStatus::GJK_CONTACT)
{
Data->mSimTetraIndices.pushBack(tetId);
}
}
}
else
{
const PxBounds3& aabb = current->getAABB();
return bound.intersects(aabb);
}
return true;
}
void TetrahedronMeshBuilder::createCollisionModelMapping(const TetrahedronMeshData& collisionMesh, const SoftBodyCollisionData& collisionData, CollisionMeshMappingData& mappingData)
{
const PxU32 nbVerts = collisionMesh.mNbVertices;
mappingData.mCollisionAccumulatedTetrahedronsRef = PX_ALLOCATE(PxU32, nbVerts, "tetCounts");
PxU32* tempCounts = PX_ALLOCATE(PxU32, nbVerts, "tempCounts");
PxU32* tetCounts = mappingData.mCollisionAccumulatedTetrahedronsRef;
PxMemZero(tetCounts, sizeof(PxU32) * nbVerts);
PxMemZero(tempCounts, sizeof(PxU32) * nbVerts);
const PxU32 nbTetrahedrons = collisionMesh.mNbTetrahedrons;
IndTetrahedron32* tetra = reinterpret_cast<IndTetrahedron32*>(collisionData.mGRB_primIndices);
for (PxU32 i = 0; i < nbTetrahedrons; i++)
{
IndTetrahedron32& tet = tetra[i];
tetCounts[tet.mRef[0]]++;
tetCounts[tet.mRef[1]]++;
tetCounts[tet.mRef[2]]++;
tetCounts[tet.mRef[3]]++;
}
//compute runsum
PxU32 totalReference = 0;
for (PxU32 i = 0; i < nbVerts; ++i)
{
PxU32 originalReference = tetCounts[i];
tetCounts[i] = totalReference;
totalReference += originalReference;
}
mappingData.mCollisionTetrahedronsReferences = PX_ALLOCATE(PxU32, totalReference, "mGMMappedTetrahedrons");
mappingData.mCollisionNbTetrahedronsReferences = totalReference;
PxU32* tetrahedronRefs = mappingData.mCollisionTetrahedronsReferences;
for (PxU32 i = 0; i < nbTetrahedrons; i++)
{
IndTetrahedron32& tet = tetra[i];
const PxU32 ind0 = tet.mRef[0];
const PxU32 ind1 = tet.mRef[1];
const PxU32 ind2 = tet.mRef[2];
const PxU32 ind3 = tet.mRef[3];
tetrahedronRefs[tetCounts[ind0] + tempCounts[ind0]] = i;
tempCounts[ind0]++;
tetrahedronRefs[tetCounts[ind1] + tempCounts[ind1]] = i;
tempCounts[ind1]++;
tetrahedronRefs[tetCounts[ind2] + tempCounts[ind2]] = i;
tempCounts[ind2]++;
tetrahedronRefs[tetCounts[ind3] + tempCounts[ind3]] = i;
tempCounts[ind3]++;
}
PxVec3* verts = collisionMesh.mVertices;
PxU8* tetHint = reinterpret_cast<PxU8*>(collisionData.mGRB_tetraSurfaceHint);
IndTetrahedron32* surfaceTets = PX_ALLOCATE(IndTetrahedron32, nbTetrahedrons, "surfaceTets");
PxU8* surfaceVertsHint = PX_ALLOCATE(PxU8, nbVerts, "surfaceVertsHint");
PxU32* surfaceVertToTetRemap = PX_ALLOCATE(PxU32, nbVerts, "surfaceVertToTetRemap");
PxMemSet(surfaceVertsHint, 0, nbVerts);
PxU32 nbSurfaceTets = 0;
for (PxU32 i = 0; i < nbTetrahedrons; i++)
{
IndTetrahedron32& originalTet = tetra[i];
PxU8 hint = tetHint[i];
//This is a surface triangle
if (hint != 0)
{
IndTetrahedron32& tet = surfaceTets[nbSurfaceTets];
tet.mRef[0] = originalTet.mRef[0];
tet.mRef[1] = originalTet.mRef[1];
tet.mRef[2] = originalTet.mRef[2];
tet.mRef[3] = originalTet.mRef[3];
if (hint & 1) //0111
{
if (surfaceVertsHint[originalTet.mRef[0]] == 0)
{
surfaceVertsHint[originalTet.mRef[0]] = 1;
surfaceVertToTetRemap[originalTet.mRef[0]] = i;
}
if (surfaceVertsHint[originalTet.mRef[1]] == 0)
{
surfaceVertsHint[originalTet.mRef[1]] = 1;
surfaceVertToTetRemap[originalTet.mRef[1]] = i;
}
if (surfaceVertsHint[originalTet.mRef[2]] == 0)
{
surfaceVertsHint[originalTet.mRef[2]] = 1;
surfaceVertToTetRemap[originalTet.mRef[2]] = i;
}
}
if (hint & 2)//1011
{
if (surfaceVertsHint[originalTet.mRef[0]] == 0)
{
surfaceVertsHint[originalTet.mRef[0]] = 1;
surfaceVertToTetRemap[originalTet.mRef[0]] = i;
}
if (surfaceVertsHint[originalTet.mRef[1]] == 0)
{
surfaceVertsHint[originalTet.mRef[1]] = 1;
surfaceVertToTetRemap[originalTet.mRef[1]] = i;
}
if (surfaceVertsHint[originalTet.mRef[3]] == 0)
{
surfaceVertsHint[originalTet.mRef[3]] = 1;
surfaceVertToTetRemap[originalTet.mRef[3]] = i;
}
}
if (hint & 4) //1101
{
if (surfaceVertsHint[originalTet.mRef[0]] == 0)
{
surfaceVertsHint[originalTet.mRef[0]] = 1;
surfaceVertToTetRemap[originalTet.mRef[0]] = i;
}
if (surfaceVertsHint[originalTet.mRef[2]] == 0)
{
surfaceVertsHint[originalTet.mRef[2]] = 1;
surfaceVertToTetRemap[originalTet.mRef[2]] = i;
}
if (surfaceVertsHint[originalTet.mRef[3]] == 0)
{
surfaceVertsHint[originalTet.mRef[3]] = 1;
surfaceVertToTetRemap[originalTet.mRef[3]] = i;
}
}
if (hint & 8)//1110
{
if (surfaceVertsHint[originalTet.mRef[1]] == 0)
{
surfaceVertsHint[originalTet.mRef[1]] = 1;
surfaceVertToTetRemap[originalTet.mRef[1]] = i;
}
if (surfaceVertsHint[originalTet.mRef[2]] == 0)
{
surfaceVertsHint[originalTet.mRef[2]] = 1;
surfaceVertToTetRemap[originalTet.mRef[2]] = i;
}
if (surfaceVertsHint[originalTet.mRef[3]] == 0)
{
surfaceVertsHint[originalTet.mRef[3]] = 1;
surfaceVertToTetRemap[originalTet.mRef[3]] = i;
}
}
nbSurfaceTets++;
}
}
PxU32 numSurfaceVerts = 0;
for (PxU32 i = 0; i < nbVerts; ++i)
{
PxU32 hint = surfaceVertsHint[i];
if (hint)
numSurfaceVerts++;
}
mappingData.mCollisionSurfaceVertsHint = PX_ALLOCATE(PxU8, nbVerts, "mCollisionSurfaceVertsHint");
mappingData.mCollisionSurfaceVertToTetRemap = PX_ALLOCATE(PxU32, nbVerts, "mCollisionSurfaceVertToTetRemap");
PxMemCopy(mappingData.mCollisionSurfaceVertsHint, surfaceVertsHint, sizeof(PxU8)*nbVerts);
PxMemCopy(mappingData.mCollisionSurfaceVertToTetRemap, surfaceVertToTetRemap, sizeof(PxU32)*nbVerts);
//Build the tree based on surface tetra
TetrahedronSourceMesh meshInterface;
// const PxReal gBoxEpsilon = 0.1f;
meshInterface.initRemap();
meshInterface.setNbVertices(collisionMesh.mNbVertices);
meshInterface.setNbTetrahedrons(nbSurfaceTets);
meshInterface.setPointers(surfaceTets, NULL, verts);
const PxU32 nbPrimsPerLeaf = 4;
BV4_AABBTree aabbTree;
if (!aabbTree.buildFromMesh(meshInterface, nbPrimsPerLeaf))
{
PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "BV4_AABBTree tree failed to build.");
return;
}
PX_FREE(tempCounts);
PX_FREE(surfaceTets);
PX_FREE(surfaceVertsHint);
PX_FREE(surfaceVertToTetRemap);
}
/*//Keep for debugging & verification
void writeTets(const char* path, const PxVec3* tetPoints, PxU32 numPoints, const IndTetrahedron32* tets, PxU32 numTets)
{
FILE *fp;
fp = fopen(path, "w+");
fprintf(fp, "# Tetrahedral mesh generated using\n\n");
fprintf(fp, "# %d vertices\n", numPoints);
for (PxU32 i = 0; i < numPoints; ++i)
{
fprintf(fp, "v %f %f %f\n", PxF64(tetPoints[i].x), PxF64(tetPoints[i].y), PxF64(tetPoints[i].z));
}
fprintf(fp, "\n");
fprintf(fp, "# %d tetrahedra\n", numTets);
for (PxU32 i = 0; i < numTets; ++i)
{
fprintf(fp, "t %d %d %d %d\n", tets[i].mRef[0], tets[i].mRef[1], tets[i].mRef[2], tets[i].mRef[3]);
}
fclose(fp);
}*/
void TetrahedronMeshBuilder::computeModelsMapping(TetrahedronMeshData& simulationMesh,
const TetrahedronMeshData& collisionMesh, const SoftBodyCollisionData& collisionData,
CollisionMeshMappingData& mappingData, bool buildGPUData, const PxBoundedData* vertexToTet)
{
createCollisionModelMapping(collisionMesh, collisionData, mappingData);
if (buildGPUData)
{
const PxU32 gridModelNbVerts = simulationMesh.mNbVertices;
PxVec3* gridModelVertices = PX_ALLOCATE(PxVec3, gridModelNbVerts, "gridModelVertices");
PxVec3* gridModelVerticesInvMass = simulationMesh.mVertices;
for (PxU32 i = 0; i < gridModelNbVerts; ++i)
{
gridModelVertices[i] = gridModelVerticesInvMass[i];
}
PX_ASSERT(!(collisionMesh.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
TetrahedronSourceMesh meshInterface;
// const PxReal gBoxEpsilon = 0.1f;
meshInterface.initRemap();
meshInterface.setNbVertices(simulationMesh.mNbVertices);
meshInterface.setNbTetrahedrons(simulationMesh.mNbTetrahedrons);
IndTetrahedron32* tetrahedron32 = reinterpret_cast<IndTetrahedron32*>(simulationMesh.mTetrahedrons);
meshInterface.setPointers(tetrahedron32, NULL, gridModelVertices);
//writeTets("C:\\tmp\\grid.tet", gridModelVertices, simulationMesh.mNbVertices, tetrahedron32, simulationMesh.mNbTetrahedrons);
//writeTets("C:\\tmp\\col.tet", mVertices, mNbVertices, reinterpret_cast<IndTetrahedron32*>(mTetrahedrons), mNbTetrahedrons);
const PxU32 nbPrimsPerLeaf = 2;
BV4_AABBTree aabbTree;
if (!aabbTree.buildFromMesh(meshInterface, nbPrimsPerLeaf))
{
PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "BV32 tree failed to build.");
return;
}
const PxU32 nbTetModelVerts = collisionMesh.mNbVertices;
mappingData.mVertsBarycentricInGridModel = reinterpret_cast<PxReal*>(PX_ALLOC(nbTetModelVerts * sizeof(PxReal) * 4, "mVertsInfoMapOriginalGridModel"));
mappingData.mVertsRemapInGridModel = reinterpret_cast<PxU32*>(PX_ALLOC(nbTetModelVerts * sizeof(PxU32), "mVertsRemapInGridModel"));
PxReal* vertsBarycentricInGridModel = mappingData.mVertsBarycentricInGridModel;
PxU32* vertsRemapInGridModel = mappingData.mVertsRemapInGridModel;
if (vertexToTet && vertexToTet->count == nbTetModelVerts)
{
for (PxU32 i = 0; i < nbTetModelVerts; ++i)
{
vertsRemapInGridModel[i] = vertexToTet->at<PxU32>(i);
const PxVec3& p = collisionMesh.mVertices[i];
IndTetrahedron32& tetra = tetrahedron32[vertsRemapInGridModel[i]];
const PxVec3& a = gridModelVertices[tetra.mRef[0]];
const PxVec3& b = gridModelVertices[tetra.mRef[1]];
const PxVec3& c = gridModelVertices[tetra.mRef[2]];
const PxVec3& d = gridModelVertices[tetra.mRef[3]];
PxVec4 bary;
computeBarycentric(a, b, c, d, p, bary);
#if PX_DEBUG
const PxReal eps = 1e-4f;
PX_ASSERT((bary.x >= -eps && bary.x <= 1.f + eps) && (bary.y >= -eps && bary.y <= 1.f + eps) &&
(bary.z >= -eps && bary.z <= 1.f + eps) && (bary.w >= -eps && bary.w <= 1.f + eps));
PX_ASSERT(vertexToTet->at<PxI32>(i) >= 0);
#endif
const PxU32 index = i * 4;
vertsBarycentricInGridModel[index] = bary.x;
vertsBarycentricInGridModel[index + 1] = bary.y;
vertsBarycentricInGridModel[index + 2] = bary.z;
vertsBarycentricInGridModel[index + 3] = bary.w;
}
}
else
{
for (PxU32 i = 0; i < nbTetModelVerts; ++i)
{
DistanceCheck result;
result.mVerts = gridModelVertices;
result.mTetrahedron32 = tetrahedron32;
result.mOriginalVert = collisionMesh.mVertices[i];
result.mDistanceSq = PX_MAX_F32;
result.mNbPrimsPerLeaf = 2;
result.mNbPrims = simulationMesh.mNbTetrahedrons;
aabbTree.walkDistance(gDistanceCheckCallback, gDistanceNodeCheckCallback, &result);
IndTetrahedron32& tetra = tetrahedron32[result.mTetInd];
const PxVec3& a = gridModelVertices[tetra.mRef[0]];
const PxVec3& b = gridModelVertices[tetra.mRef[1]];
const PxVec3& c = gridModelVertices[tetra.mRef[2]];
const PxVec3& d = gridModelVertices[tetra.mRef[3]];
PxVec4 bary;
computeBarycentric(a, b, c, d, result.mOriginalVert, bary);
#if PX_DEBUG
const PxReal eps = 1e-4f;
PX_ASSERT((bary.x >= -eps && bary.x <= 1.f + eps) && (bary.y >= -eps && bary.y <= 1.f + eps) &&
(bary.z >= -eps && bary.z <= 1.f + eps) && (bary.w >= -eps && bary.w <= 1.f + eps));
#endif
const PxU32 index = i * 4;
vertsBarycentricInGridModel[index] = bary.x;
vertsBarycentricInGridModel[index + 1] = bary.y;
vertsBarycentricInGridModel[index + 2] = bary.z;
vertsBarycentricInGridModel[index + 3] = bary.w;
vertsRemapInGridModel[i] = result.mTetInd;
}
}
PxU16* colMaterials = collisionMesh.mMaterialIndices;
PxU16* simMaterials = NULL;
const PxU32 nbSimMeshTetra = simulationMesh.mNbTetrahedrons;
if (colMaterials)
{
simMaterials = simulationMesh.allocateMaterials();
for (PxU32 i = 0; i < nbSimMeshTetra; ++i)
{
simMaterials[i] = 0xffff;
}
}
const PxU32 nbColMeshTetra = collisionMesh.mNbTetrahedrons;
PxArray<PxU32> tetIndiceRunSum;
tetIndiceRunSum.reserve(nbColMeshTetra * 4);
mappingData.mTetsAccumulatedRemapColToSim = reinterpret_cast<PxU32*>( PX_ALLOC(sizeof(PxU32) * nbColMeshTetra, "mTetsAccumulatedRemapColToSim"));
PxU32* runSum = mappingData.mTetsAccumulatedRemapColToSim;
PxU32 offset = 0;
IndTetrahedron32* colTetra = reinterpret_cast<IndTetrahedron32*>(collisionData.mGRB_primIndices);
OverlapCheck result;
result.mSimTetraIndices.reserve(100);
//IndTetrahedron32* simTetra = reinterpret_cast<IndTetrahedron32*>(simulationMesh.mTetrahedrons);
for (PxU32 i = 0; i < nbColMeshTetra; ++i)
{
IndTetrahedron32& tetInd = colTetra[i];
const PxVec3 a = collisionMesh.mVertices[tetInd.mRef[0]];
const PxVec3 b = collisionMesh.mVertices[tetInd.mRef[1]];
const PxVec3 c = collisionMesh.mVertices[tetInd.mRef[2]];
const PxVec3 d = collisionMesh.mVertices[tetInd.mRef[3]];
const PxVec3 max = a.maximum(b.maximum(c.maximum(d)));
const PxVec3 min = a.minimum(b.minimum(c.minimum(d)));
PxBounds3 bound(min, max);
result.mSimTetraIndices.forceSize_Unsafe(0);
result.mColMeshVerts = collisionMesh.mVertices;
result.mColTetBound = bound;
result.mColTetrahedron32 = tetInd;
result.mSimMeshTetra = tetrahedron32;
result.mSimMeshVerts = gridModelVertices;
result.mNbPrimsPerLeaf = 2;
result.mNbPrims = simulationMesh.mNbTetrahedrons;
aabbTree.walk(gOverlapCallback, &result);
const PxU32 size = result.mSimTetraIndices.size();
PX_ASSERT(size > 0);
for (PxU32 j = 0; j < size; ++j)
{
const PxU32 simTetraInd = result.mSimTetraIndices[j];
if (simMaterials && simMaterials[simTetraInd] == 0xffff)
simMaterials[simTetraInd] = colMaterials[i];
tetIndiceRunSum.pushBack(simTetraInd);
}
offset += size;
runSum[i] = offset;
}
if (simMaterials)
{
//loop through all the simMaterials to make sure material indices has valid material index. If not,
//we will use the first material index for the tet materials
for (PxU32 i = 0; i < nbSimMeshTetra; ++i)
{
if (simMaterials[i] == 0xffff)
simMaterials[i] = 0;
}
}
mappingData.mTetsRemapSize = tetIndiceRunSum.size();
mappingData.mTetsRemapColToSim = reinterpret_cast<PxU32*>(PX_ALLOC(sizeof(PxU32) * mappingData.mTetsRemapSize, "mTetsRemapInSimModel"));
PxMemCopy(mappingData.mTetsRemapColToSim, tetIndiceRunSum.begin(), sizeof(PxU32) * mappingData.mTetsRemapSize);
#if PX_DEBUG
for (PxU32 i = 0; i < tetIndiceRunSum.size(); ++i)
{
PX_ASSERT(tetIndiceRunSum[i] < 0xFFFFFFFF);
}
for (PxU32 i = 1; i < collisionMesh.mNbTetrahedrons; ++i)
{
PX_ASSERT(runSum[i - 1] < runSum[i]);
}
#endif
PX_FREE(gridModelVertices);
}
}
PX_FORCE_INLINE PxF32 tetVolume(const PxVec3& a, const PxVec3& b, const PxVec3& c, const PxVec3& d)
{
return (-1.0f / 6.0f) * (a - d).dot((b - d).cross(c - d));
}
template<class T>
static void writeToSimTetraIndice(const T* tetIndices, const PxVec3* verts, TetrahedronT<PxU32>* dest)
{
const PxVec3 a = verts[tetIndices[0]];
const PxVec3 b = verts[tetIndices[1]];
const PxVec3 c = verts[tetIndices[2]];
const PxVec3 d = verts[tetIndices[3]];
if (tetVolume(a, b, c, d) < 0.f)
{
dest->v[0] = tetIndices[1];
dest->v[1] = tetIndices[0];
dest->v[2] = tetIndices[2];
dest->v[3] = tetIndices[3];
}
else
{
dest->v[0] = tetIndices[0];
dest->v[1] = tetIndices[1];
dest->v[2] = tetIndices[2];
dest->v[3] = tetIndices[3];
}
}
void TetrahedronMeshBuilder::computeTetData(const PxTetrahedronMeshDesc& desc, TetrahedronMeshData& mesh)
{
const PxU32 tetMeshNbPoints = desc.points.count;
const PxU32 tetMeshNbTets = desc.tetrahedrons.count;
mesh.mNbVertices = tetMeshNbPoints;
mesh.mVertices = PX_ALLOCATE(PxVec3, tetMeshNbPoints, "mVertices");
mesh.mNbTetrahedrons = tetMeshNbTets;
mesh.mTetrahedrons = PX_ALLOC(tetMeshNbTets * sizeof(TetrahedronT<PxU32>), "mTetrahedrons");
mesh.mFlags = desc.flags; //TODO: flags are not of same type...
computeLocalBoundsAndGeomEpsilon(mesh.mVertices, tetMeshNbPoints, mesh.mAABB, mesh.mGeomEpsilon);
}
bool transferMass(PxI32 a, PxI32 b, PxArray<PxReal>& newMasses, const PxReal* mass, PxReal maxRatio, PxReal smoothingSpeed)
{
const PxReal mA = mass[a];
const PxReal mB = mass[b];
const PxReal ratio = PxMax(mA, mB) / PxMin(mA, mB);
if (ratio > maxRatio)
{
const PxReal delta = smoothingSpeed * PxMin(mA, mB);
if (mA > mB)
{
newMasses[a] -= delta;
newMasses[b] += delta;
}
else
{
newMasses[a] += delta;
newMasses[b] -= delta;
}
return true;
}
return false;
}
void smoothMassRatiosWhilePreservingTotalMass( PxReal* massPerNode, PxU32 numNodes, const PxU32* tets, PxI32 numTets, PxReal maxRatio /*= 2.0f*/, PxReal smoothingSpeed = 0.25f)
{
if (maxRatio == FLT_MAX)
return;
PxArray<PxReal> newMasses;
newMasses.resize(numNodes);
for (PxU32 i = 0; i < numNodes; ++i)
newMasses[i] = massPerNode[i];
const PxU32 tetEdges[6][2] = { {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3} };
PxU32 l = 4 * numTets;
PxU32 counter = 0;
bool success = true;
while (success)
{
++counter;
success = false;
for (PxU32 i = 0; i < l; i += 4)
{
for (PxU32 j = 0; j < 6; ++j)
success = success || transferMass(tets[i + tetEdges[j][0]], tets[i + tetEdges[j][1]], newMasses, massPerNode, maxRatio, smoothingSpeed);
}
for (PxU32 i = 0; i < numNodes; ++i)
massPerNode[i] = newMasses[i];
if (counter > 100000)
break;
}
//printf("%i", counter);
}
void TetrahedronMeshBuilder::computeSimData(const PxTetrahedronMeshDesc& desc, TetrahedronMeshData& simulationMesh, SoftBodySimulationData& simulationData, const PxCookingParams& params)
{
const PxU32 simTetMeshNbPoints = desc.points.count;
const PxU32 simTetMeshNbTets = desc.tetrahedrons.count;
simulationMesh.mNbVertices = simTetMeshNbPoints;
simulationMesh.mVertices = reinterpret_cast<PxVec3*>(PX_ALLOC(simTetMeshNbPoints * sizeof(PxVec3), "mGridModelVertices"));
simulationData.mGridModelInvMass = reinterpret_cast<PxReal*>(PX_ALLOC(simTetMeshNbPoints * sizeof(PxReal), "mGridModelInvMass"));
simulationMesh.mNbTetrahedrons = simTetMeshNbTets;
simulationMesh.mTetrahedrons = PX_ALLOC(simTetMeshNbTets * sizeof(TetrahedronT<PxU32>), "mGridModelTetrahedrons");
simulationData.mNumTetsPerElement = desc.tetsPerElement;
immediateCooking::gatherStrided(desc.points.data, simulationMesh.mVertices, simTetMeshNbPoints, sizeof(PxVec3), desc.points.stride);
TetrahedronT<PxU32>* gridModelTetrahedrons = reinterpret_cast<TetrahedronT<PxU32>*>(simulationMesh.mTetrahedrons);
for (PxU32 i = 0; i < simTetMeshNbPoints; ++i)
simulationData.mGridModelInvMass[i] = 0;
TetrahedronT<PxU32>* dest = gridModelTetrahedrons;
const TetrahedronT<PxU32>* pastLastDest = gridModelTetrahedrons + simTetMeshNbTets;
const PxU8* source = reinterpret_cast<const PxU8*>(desc.tetrahedrons.data);
if (desc.flags & PxMeshFlag::e16_BIT_INDICES)
{
while (dest < pastLastDest)
{
const PxU16* tet16 = reinterpret_cast<const PxU16*>(source);
writeToSimTetraIndice<PxU16>(tet16, simulationMesh.mVertices, dest);
dest++;
source += desc.tetrahedrons.stride;
}
}
else
{
while (dest < pastLastDest)
{
const PxU32* tet32 = reinterpret_cast<const PxU32*>(source);
writeToSimTetraIndice<PxU32>(tet32, simulationMesh.mVertices, dest);
dest++;
source += desc.tetrahedrons.stride;
}
}
simulationData.mGridModelTetraRestPoses = PX_ALLOCATE(PxMat33, desc.tetrahedrons.count, "mGridModelTetraRestPoses");
computeRestPoseAndPointMass(gridModelTetrahedrons, simulationMesh.mNbTetrahedrons,
simulationMesh.mVertices, simulationData.mGridModelInvMass, simulationData.mGridModelTetraRestPoses);
PxU32* accumulatedTetrahedronPerPartition = computeGridModelTetrahedronPartitions(simulationMesh, simulationData);
if (simulationData.mNumTetsPerElement == 1)
combineGridModelPartitions(simulationMesh, simulationData, &accumulatedTetrahedronPerPartition);
else
combineGridModelPartitionsHexMesh(simulationMesh, simulationData, &accumulatedTetrahedronPerPartition, simulationData.mNumTetsPerElement);
smoothMassRatiosWhilePreservingTotalMass(simulationData.mGridModelInvMass, simulationMesh.mNbVertices, reinterpret_cast<PxU32*>(gridModelTetrahedrons), simulationMesh.mNbTetrahedrons, params.maxWeightRatioInTet);
#if PX_DEBUG
PxReal max = 0;
PxReal min = FLT_MAX;
for (PxU32 i = 0; i < simulationMesh.mNbVertices; ++i)
{
PxReal w = simulationData.mGridModelInvMass[i];
max = PxMax(w, max);
min = PxMin(w, min);
}
PxReal ratio = max / min;
PX_UNUSED(ratio);
#endif
for (PxU32 i = 0; i < simulationMesh.mNbVertices; ++i)
{
simulationData.mGridModelInvMass[i] = 1.0f / simulationData.mGridModelInvMass[i];
}
PX_FREE(accumulatedTetrahedronPerPartition);
//const PxU32 gridModelNbVerts = simulationMesh.mNbVertices;
//PxVec3* gridModelVertices = reinterpret_cast<PxVec3*>(PX_ALLOC(gridModelNbVerts * sizeof(PxVec3), "gridModelVertices"));
//PxVec4* gridModelVerticesInvMass = simulationMesh.mVerticesInvMass;
//for (PxU32 i = 0; i < gridModelNbVerts; ++i)
//{
// gridModelVertices[i] = gridModelVerticesInvMass[i].getXYZ();
//}
//writeTets("C:\\tmp\\grid.tet", gridModelVertices, simulationMesh.mNbVertices, reinterpret_cast<IndTetrahedron32*>(simulationMesh.mTetrahedrons), simulationMesh.mNbTetrahedrons);
//writeTets("C:\\tmp\\col.tet", mVertices, mNbVertices, reinterpret_cast<IndTetrahedron32*>(mTetrahedrons), mNbTetrahedrons);
//PX_FREE(gridModelVertices);
}
bool TetrahedronMeshBuilder::computeCollisionData(const PxTetrahedronMeshDesc& collisionMeshDesc, TetrahedronMeshData& collisionMesh, SoftBodyCollisionData& collisionData,
const PxCookingParams& params, bool validateMesh)
{
const PxU32 originalTetrahedronCount = collisionMeshDesc.tetrahedrons.count;
// Create a local copy that we can modify
PxTetrahedronMeshDesc desc = collisionMeshDesc;
// Save simple params
{
// Handle implicit topology
PxU32* topology = NULL;
if (!desc.tetrahedrons.data)
{
// We'll create 32-bit indices
desc.flags &= ~PxMeshFlag::e16_BIT_INDICES;
desc.tetrahedrons.stride = sizeof(PxU32) * 4;
{
// Non-indexed mesh => create implicit topology
desc.tetrahedrons.count = desc.points.count / 4;
// Create default implicit topology
topology = PX_ALLOCATE(PxU32, desc.points.count, "topology");
for (PxU32 i = 0; i < desc.points.count; i++)
topology[i] = i;
desc.tetrahedrons.data = topology;
}
}
// Continue as usual using our new descriptor
// Convert and clean the input mesh
if (!importMesh(collisionMeshDesc, params, collisionMesh, collisionData, validateMesh))
{
PX_FREE(topology);
return false;
}
// Cleanup if needed
PX_FREE(topology);
}
//copy the original tetrahedron indices to grb tetrahedron indices if buildGRBData is true
if(!createMidPhaseStructure(collisionMesh, collisionData, params))
return false;
recordTetrahedronIndices(collisionMesh, collisionData, params.buildGPUData);
// Compute local bounds
computeLocalBoundsAndGeomEpsilon(collisionMesh.mVertices, collisionMesh.mNbVertices, collisionMesh.mAABB, collisionMesh.mGeomEpsilon);
if(!createGRBMidPhaseAndData(originalTetrahedronCount, collisionMesh, collisionData, params))
return false;
// Use collisionData.mGRB_primIndices rather than collisionMesh.mTetrahedrons: we want rest poses for the topology-remapped mesh, which is the actual one in simulation.
computeRestPoseAndPointMass(reinterpret_cast<TetrahedronT<PxU32>*>(collisionData.mGRB_primIndices), collisionMesh.mNbTetrahedrons, collisionMesh.mVertices, NULL, collisionData.mTetraRestPoses);
return true;
}
bool TetrahedronMeshBuilder::loadFromDesc(const PxTetrahedronMeshDesc& simulationMeshDesc, const PxTetrahedronMeshDesc& collisionMeshDesc,
PxSoftBodySimulationDataDesc softbodyDataDesc, TetrahedronMeshData& simulationMesh, SoftBodySimulationData& simulationData,
TetrahedronMeshData& collisionMesh, SoftBodyCollisionData& collisionData, CollisionMeshMappingData& mappingData, const PxCookingParams& params, bool validateMesh)
{
if (!simulationMeshDesc.isValid() || !collisionMeshDesc.isValid() || !softbodyDataDesc.isValid())
return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "TetrahedronMesh::loadFromDesc: desc.isValid() failed!");
// verify the mesh params
if (!params.midphaseDesc.isValid())
return PxGetFoundation().error(PxErrorCode::eINVALID_PARAMETER, PX_FL, "TetrahedronMesh::loadFromDesc: mParams.midphaseDesc.isValid() failed!");
if (!computeCollisionData(collisionMeshDesc, collisionMesh, collisionData, params, validateMesh))
return false;
computeSimData(simulationMeshDesc, simulationMesh, simulationData, params);
computeModelsMapping(simulationMesh, collisionMesh, collisionData, mappingData, params.buildGPUData, &softbodyDataDesc.vertexToTet);
#if PX_DEBUG
for (PxU32 i = 0; i < collisionMesh.mNbVertices; ++i) {
PX_ASSERT(mappingData.mVertsRemapInGridModel[i] < simulationMesh.mNbTetrahedrons);
}
#endif
return true;
}
static void writeIndice(const PxU32 serialFlags, const PxU32* indices, const PxU32 nbIndices,
const bool platformMismatch, PxOutputStream& stream)
{
//write out tetrahedron indices
if (serialFlags & IMSF_8BIT_INDICES)
{
for (PxU32 i = 0; i < nbIndices; i++)
{
PxI8 data = PxI8(indices[i]);
stream.write(&data, sizeof(PxU8));
}
}
else if (serialFlags & IMSF_16BIT_INDICES)
{
for (PxU32 i = 0; i < nbIndices; i++)
writeWord(PxTo16(indices[i]), platformMismatch, stream);
}
else
{
writeIntBuffer(indices, nbIndices, platformMismatch, stream);
}
}
bool TetrahedronMeshBuilder::saveTetrahedronMeshData(PxOutputStream& stream, bool platformMismatch, const PxCookingParams& params, const TetrahedronMeshData& mesh)
{
// Export header
if (!writeHeader('T', 'E', 'M', 'E', PX_TET_MESH_VERSION, platformMismatch, stream))
return false;
// Export serialization flags
PxU32 serialFlags = 0;
// Compute serialization flags for indices
PxU32 maxIndex = 0;
const TetrahedronT<PxU32>* tets = reinterpret_cast<const TetrahedronT<PxU32>*>(mesh.mTetrahedrons);
for (PxU32 i = 0; i < mesh.mNbTetrahedrons; i++)
{
if (tets[i].v[0] > maxIndex) maxIndex = tets[i].v[0];
if (tets[i].v[1] > maxIndex) maxIndex = tets[i].v[1];
if (tets[i].v[2] > maxIndex) maxIndex = tets[i].v[2];
if (tets[i].v[3] > maxIndex) maxIndex = tets[i].v[3];
}
bool force32 = (params.meshPreprocessParams & PxMeshPreprocessingFlag::eFORCE_32BIT_INDICES);
if (maxIndex <= 0xFFFF && !force32)
serialFlags |= (maxIndex <= 0xFF ? IMSF_8BIT_INDICES : IMSF_16BIT_INDICES);
writeDword(serialFlags, platformMismatch, stream);
// Export mesh
writeDword(mesh.mNbVertices, platformMismatch, stream);
//writeDword(collisionData.mNbSurfaceTriangles, platformMismatch, stream);
writeDword(mesh.mNbTetrahedrons, platformMismatch, stream);
writeFloatBuffer(&mesh.mVertices->x, mesh.mNbVertices * 3, platformMismatch, stream);
const PxU32 nbTetIndices = mesh.mNbTetrahedrons * 4;
//write out tetrahedron indices
writeIndice(serialFlags, tets->v, nbTetIndices, platformMismatch, stream);
// Export local bounds
writeFloat(mesh.mGeomEpsilon, platformMismatch, stream);
writeFloat(mesh.mAABB.minimum.x, platformMismatch, stream);
writeFloat(mesh.mAABB.minimum.y, platformMismatch, stream);
writeFloat(mesh.mAABB.minimum.z, platformMismatch, stream);
writeFloat(mesh.mAABB.maximum.x, platformMismatch, stream);
writeFloat(mesh.mAABB.maximum.y, platformMismatch, stream);
writeFloat(mesh.mAABB.maximum.z, platformMismatch, stream);
return true;
}
bool TetrahedronMeshBuilder::saveSoftBodyMeshData(PxOutputStream& stream, bool platformMismatch, const PxCookingParams& params,
const TetrahedronMeshData& simulationMesh, const SoftBodySimulationData& simulationData, const TetrahedronMeshData& collisionMesh,
const SoftBodyCollisionData& collisionData, const CollisionMeshMappingData& mappingData)
{
// Export header
if (!writeHeader('S', 'O', 'M', 'E', PX_SOFTBODY_MESH_VERSION, platformMismatch, stream))
return false;
// Export serialization flags
PxU32 serialFlags = 0;
if (collisionMesh.mMaterialIndices) serialFlags |= IMSF_MATERIALS;
if (collisionData.mFaceRemap) serialFlags |= IMSF_FACE_REMAP;
//if (mTetraSurfaceHint) serialFlags |= IMSF_ADJACENCIES; // using IMSF_ADJACENCIES to represent surfaceHint for tetrahedron mesh
//if (mAdjacencies) serialFlags |= IMSF_ADJACENCIES;
if (params.buildGPUData) serialFlags |= IMSF_GRB_DATA;
// Compute serialization flags for indices
PxU32 maxIndex = 0;
const TetrahedronT<PxU32>* tets = reinterpret_cast<const TetrahedronT<PxU32>*>(collisionMesh.mTetrahedrons);
for (PxU32 i = 0; i < collisionMesh.mNbTetrahedrons; i++)
{
if (tets[i].v[0] > maxIndex) maxIndex = tets[i].v[0];
if (tets[i].v[1] > maxIndex) maxIndex = tets[i].v[1];
if (tets[i].v[2] > maxIndex) maxIndex = tets[i].v[2];
if (tets[i].v[3] > maxIndex) maxIndex = tets[i].v[3];
}
const TetrahedronT<PxU32>* gridModelTets = reinterpret_cast<const TetrahedronT<PxU32>*>(simulationMesh.mTetrahedrons);
for (PxU32 i = 0; i < simulationMesh.mNbTetrahedrons; i++)
{
if (gridModelTets[i].v[0] > maxIndex) maxIndex = gridModelTets[i].v[0];
if (gridModelTets[i].v[1] > maxIndex) maxIndex = gridModelTets[i].v[1];
if (gridModelTets[i].v[2] > maxIndex) maxIndex = gridModelTets[i].v[2];
if (gridModelTets[i].v[3] > maxIndex) maxIndex = gridModelTets[i].v[3];
}
bool force32 = (params.meshPreprocessParams & PxMeshPreprocessingFlag::eFORCE_32BIT_INDICES);
if (maxIndex <= 0xFFFF && !force32)
serialFlags |= (maxIndex <= 0xFF ? IMSF_8BIT_INDICES : IMSF_16BIT_INDICES);
writeDword(serialFlags, platformMismatch, stream);
// Export mesh
writeDword(collisionMesh.mNbVertices, platformMismatch, stream);
//writeDword(collisionData.mNbSurfaceTriangles, platformMismatch, stream);
writeDword(collisionMesh.mNbTetrahedrons, platformMismatch, stream);
writeFloatBuffer(&collisionMesh.mVertices->x, collisionMesh.mNbVertices * 3, platformMismatch, stream);
const PxU32 nbTetIndices = collisionMesh.mNbTetrahedrons * 4;
//write out tetrahedron indices
writeIndice(serialFlags, tets->v, nbTetIndices, platformMismatch, stream);
//const PxU32 nbSurfaceTriangleIndices = collisionData.mNbSurfaceTriangles * 3;
//const IndexedTriangle32* surfaceTriangles = reinterpret_cast<const IndexedTriangle32*>(collisionData.mSurfaceTriangles);
//write out surface triangle indices
//writeIndice(serialFlags, surfaceTriangles->v, nbSurfaceTriangleIndices, platformMismatch, stream);
if (collisionMesh.mMaterialIndices)
writeWordBuffer(collisionMesh.mMaterialIndices, collisionMesh.mNbTetrahedrons, platformMismatch, stream);
if (collisionData.mFaceRemap)
{
PxU32 maxId = computeMaxIndex(collisionData.mFaceRemap, collisionMesh.mNbTetrahedrons);
writeDword(maxId, platformMismatch, stream);
storeIndices(maxId, collisionMesh.mNbTetrahedrons, collisionData.mFaceRemap, stream, platformMismatch);
// writeIntBuffer(mMeshData.mFaceRemap, mMeshData.mNbTriangles, platformMismatch, stream);
}
/* if (mAdjacencies)
writeIntBuffer(mAdjacencies, mNbTetrahedrons * 4, platformMismatch, stream);*/
// Export midphase structure
saveMidPhaseStructure(stream, platformMismatch, collisionData);
// Export local bounds
writeFloat(collisionMesh.mGeomEpsilon, platformMismatch, stream);
writeFloat(collisionMesh.mAABB.minimum.x, platformMismatch, stream);
writeFloat(collisionMesh.mAABB.minimum.y, platformMismatch, stream);
writeFloat(collisionMesh.mAABB.minimum.z, platformMismatch, stream);
writeFloat(collisionMesh.mAABB.maximum.x, platformMismatch, stream);
writeFloat(collisionMesh.mAABB.maximum.y, platformMismatch, stream);
writeFloat(collisionMesh.mAABB.maximum.z, platformMismatch, stream);
// GRB write -----------------------------------------------------------------
if (params.buildGPUData)
{
const PxU32* tetIndices = reinterpret_cast<PxU32*>(collisionData.mGRB_primIndices);
writeIndice(serialFlags, tetIndices, nbTetIndices, platformMismatch, stream);
//writeIntBuffer(reinterpret_cast<PxU32*>(mMeshData.mGRB_triIndices), , mMeshData.mNbTriangles*3, platformMismatch, stream);
//writeIntBuffer(reinterpret_cast<PxU32 *>(mGRB_surfaceTriIndices), mNbTriangles*3, platformMismatch, stream);
stream.write(collisionData.mGRB_tetraSurfaceHint, collisionMesh.mNbTetrahedrons * sizeof(PxU8));
//writeIntBuffer(reinterpret_cast<PxU32 *>(mGRB_primAdjacencies), mNbTetrahedrons * 4, platformMismatch, stream);
writeIntBuffer(collisionData.mGRB_faceRemap, collisionMesh.mNbTetrahedrons, platformMismatch, stream);
writeIntBuffer(collisionData.mGRB_faceRemapInverse, collisionMesh.mNbTetrahedrons, platformMismatch, stream);
stream.write(collisionData.mTetraRestPoses, collisionMesh.mNbTetrahedrons * sizeof(PxMat33));
//Export GPU midphase structure
BV32TriangleMeshBuilder::saveMidPhaseStructure(collisionData.mGRB_BV32Tree, stream, platformMismatch);
writeDword(simulationMesh.mNbTetrahedrons, platformMismatch, stream);
writeDword(simulationMesh.mNbVertices, platformMismatch, stream);
writeDword(simulationData.mGridModelNbPartitions, platformMismatch, stream);
writeDword(simulationData.mGridModelMaxTetsPerPartitions, platformMismatch, stream);
writeDword(simulationData.mGMRemapOutputSize, platformMismatch, stream);
writeDword(simulationData.mNumTetsPerElement, platformMismatch, stream);
writeDword(mappingData.mCollisionNbTetrahedronsReferences, platformMismatch, stream);
writeDword(mappingData.mTetsRemapSize, platformMismatch, stream);
const PxU32 nbGridModeIndices = 4 * simulationMesh.mNbTetrahedrons;
const PxU32* gridModelTetIndices = reinterpret_cast<PxU32*>(simulationMesh.mTetrahedrons);
writeIndice(serialFlags, gridModelTetIndices, nbGridModeIndices, platformMismatch, stream);
const PxU32 numVertsPerElement = (simulationData.mNumTetsPerElement == 5 || simulationData.mNumTetsPerElement == 6) ? 8 : 4;
const PxU32 numElements = simulationMesh.mNbTetrahedrons / simulationData.mNumTetsPerElement;
writeFloatBuffer(&simulationMesh.mVertices->x, simulationMesh.mNbVertices * 3, platformMismatch, stream);
if (simulationMesh.mMaterialIndices)
writeWordBuffer(simulationMesh.mMaterialIndices, simulationMesh.mNbTetrahedrons, platformMismatch, stream);
writeFloatBuffer(simulationData.mGridModelInvMass, simulationMesh.mNbVertices * 1, platformMismatch, stream);
stream.write(simulationData.mGridModelTetraRestPoses, simulationMesh.mNbTetrahedrons * sizeof(PxMat33));
stream.write(simulationData.mGridModelOrderedTetrahedrons, numElements * sizeof(PxU32));
stream.write(simulationData.mGMRemapOutputCP, numElements * numVertsPerElement * sizeof(PxU32));
stream.write(simulationData.mGMAccumulatedPartitionsCP, simulationData.mGridModelNbPartitions * sizeof(PxU32));
stream.write(simulationData.mGMAccumulatedCopiesCP, simulationMesh.mNbVertices * sizeof(PxU32));
stream.write(mappingData.mCollisionAccumulatedTetrahedronsRef, collisionMesh.mNbVertices * sizeof(PxU32));
stream.write(mappingData.mCollisionTetrahedronsReferences, mappingData.mCollisionNbTetrahedronsReferences * sizeof(PxU32));
stream.write(mappingData.mCollisionSurfaceVertsHint, collisionMesh.mNbVertices * sizeof(PxU8));
stream.write(mappingData.mCollisionSurfaceVertToTetRemap, collisionMesh.mNbVertices * sizeof(PxU32));
stream.write(simulationData.mGMPullIndices, numElements * numVertsPerElement *sizeof(PxU32));
writeFloatBuffer(mappingData.mVertsBarycentricInGridModel, collisionMesh.mNbVertices * 4, platformMismatch, stream);
writeIntBuffer(mappingData.mVertsRemapInGridModel, collisionMesh.mNbVertices, platformMismatch, stream);
writeIntBuffer(mappingData.mTetsRemapColToSim, mappingData.mTetsRemapSize, platformMismatch, stream);
writeIntBuffer(mappingData.mTetsAccumulatedRemapColToSim, collisionMesh.mNbTetrahedrons, platformMismatch, stream);
}
// End of GRB write ----------------------------------------------------------
return true;
}
bool TetrahedronMeshBuilder::createMidPhaseStructure(TetrahedronMeshData& collisionMesh, SoftBodyCollisionData& collisionData, const PxCookingParams& params)
{
const PxReal gBoxEpsilon = 2e-4f;
TetrahedronSourceMesh& meshInterface = collisionData.mMeshInterface;
// const PxReal gBoxEpsilon = 0.1f;
meshInterface.initRemap();
meshInterface.setNbVertices(collisionMesh.mNbVertices);
meshInterface.setNbTetrahedrons(collisionMesh.mNbTetrahedrons);
IndTetrahedron32* tetrahedrons32 = NULL;
IndTetrahedron16* tetrahedrons16 = NULL;
if (collisionMesh.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES)
tetrahedrons16 = reinterpret_cast<IndTetrahedron16*>(collisionMesh.mTetrahedrons);
else
tetrahedrons32 = reinterpret_cast<IndTetrahedron32*>(collisionMesh.mTetrahedrons);
collisionData.mMeshInterface.setPointers(tetrahedrons32, tetrahedrons16, collisionMesh.mVertices);
const PxU32 nbTetsPerLeaf = 15;
if (!BuildBV4Ex(collisionData.mBV4Tree, meshInterface, gBoxEpsilon, nbTetsPerLeaf, false))
return PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "BV4 tree failed to build.");
const PxU32* order = meshInterface.getRemap();
if (!params.suppressTriangleMeshRemapTable || params.buildGPUData)
{
PxU32* newMap = PX_ALLOCATE(PxU32, collisionMesh.mNbTetrahedrons, "mFaceRemap");
for (PxU32 i = 0; i < collisionMesh.mNbTetrahedrons; i++)
newMap[i] = collisionData.mFaceRemap ? collisionData.mFaceRemap[order[i]] : order[i];
PX_FREE(collisionData.mFaceRemap);
collisionData.mFaceRemap = newMap;
}
meshInterface.releaseRemap();
return true;
}
void TetrahedronMeshBuilder::saveMidPhaseStructure(PxOutputStream& stream, bool mismatch, const SoftBodyCollisionData& collisionData)
{
// PT: in version 1 we defined "mismatch" as:
// const bool mismatch = (littleEndian() == 1);
// i.e. the data was *always* saved to file in big-endian format no matter what.
// In version>1 we now do the same as for other structures in the SDK: the data is
// exported either as little or big-endian depending on the passed parameter.
const PxU32 bv4StructureVersion = 3;
writeChunk('B', 'V', '4', ' ', stream);
writeDword(bv4StructureVersion, mismatch, stream);
writeFloat(collisionData.mBV4Tree.mLocalBounds.mCenter.x, mismatch, stream);
writeFloat(collisionData.mBV4Tree.mLocalBounds.mCenter.y, mismatch, stream);
writeFloat(collisionData.mBV4Tree.mLocalBounds.mCenter.z, mismatch, stream);
writeFloat(collisionData.mBV4Tree.mLocalBounds.mExtentsMagnitude, mismatch, stream);
writeDword(collisionData.mBV4Tree.mInitData, mismatch, stream);
writeFloat(collisionData.mBV4Tree.mCenterOrMinCoeff.x, mismatch, stream);
writeFloat(collisionData.mBV4Tree.mCenterOrMinCoeff.y, mismatch, stream);
writeFloat(collisionData.mBV4Tree.mCenterOrMinCoeff.z, mismatch, stream);
writeFloat(collisionData.mBV4Tree.mExtentsOrMaxCoeff.x, mismatch, stream);
writeFloat(collisionData.mBV4Tree.mExtentsOrMaxCoeff.y, mismatch, stream);
writeFloat(collisionData.mBV4Tree.mExtentsOrMaxCoeff.z, mismatch, stream);
// PT: version 3
writeDword(PxU32(collisionData.mBV4Tree.mQuantized), mismatch, stream);
writeDword(collisionData.mBV4Tree.mNbNodes, mismatch, stream);
#ifdef GU_BV4_USE_SLABS
// PT: we use BVDataPacked to get the size computation right, but we're dealing with BVDataSwizzled here!
const PxU32 NodeSize = collisionData.mBV4Tree.mQuantized ? sizeof(BVDataPackedQ) : sizeof(BVDataPackedNQ);
stream.write(collisionData.mBV4Tree.mNodes, NodeSize*collisionData.mBV4Tree.mNbNodes);
PX_ASSERT(!mismatch);
#else
#error Not implemented
#endif
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
bool BV32TetrahedronMeshBuilder::createMidPhaseStructure(const PxCookingParams& params, TetrahedronMeshData& collisionMesh, BV32Tree& bv32Tree, SoftBodyCollisionData& collisionData)
{
PX_UNUSED(params);
PX_UNUSED(collisionMesh);
PX_UNUSED(bv32Tree);
const PxReal gBoxEpsilon = 2e-4f;
TetrahedronSourceMesh meshInterface;
// const PxReal gBoxEpsilon = 0.1f;
meshInterface.initRemap();
meshInterface.setNbVertices(collisionMesh.mNbVertices);
meshInterface.setNbTetrahedrons(collisionMesh.mNbTetrahedrons);
//meshInterface.setNbVertices(meshData.mNbVertices);
//meshInterface.setNbTriangles(meshData.mNbTriangles);
PX_ASSERT(!(collisionMesh.mFlags & PxTriangleMeshFlag::e16_BIT_INDICES));
IndTetrahedron32* tetrahedron32 = reinterpret_cast<IndTetrahedron32*>(collisionData.mGRB_primIndices);
meshInterface.setPointers(tetrahedron32, NULL, collisionMesh.mVertices);
PxU32 nbTetrahedronPerLeaf = 32;
if (!BuildBV32Ex(bv32Tree, meshInterface, gBoxEpsilon, nbTetrahedronPerLeaf))
return PxGetFoundation().error(PxErrorCode::eINTERNAL_ERROR, PX_FL, "BV32 tree failed to build.");
const PxU32* order = meshInterface.getRemap();
if (collisionMesh.mMaterialIndices)
{
PxFEMMaterialTableIndex* newMat = PX_ALLOCATE(PxFEMMaterialTableIndex, collisionMesh.mNbTetrahedrons, "mMaterialIndices");
for (PxU32 i = 0; i < collisionMesh.mNbTetrahedrons; i++)
newMat[i] = collisionMesh.mMaterialIndices[order[i]];
PX_FREE(collisionMesh.mMaterialIndices);
collisionMesh.mMaterialIndices = newMat;
}
//suppressTriangleMeshRemapTable can use for tetrahedron mesh remap table
if (!params.suppressTriangleMeshRemapTable || params.buildGPUData)
{
PxU32* newMap = PX_ALLOCATE(PxU32, collisionMesh.mNbTetrahedrons, "mGRB_faceRemap");
for (PxU32 i = 0; i<collisionMesh.mNbTetrahedrons; i++)
newMap[i] = collisionData.mGRB_faceRemap ? collisionData.mGRB_faceRemap[order[i]] : order[i];
PX_FREE(collisionData.mGRB_faceRemap);
collisionData.mGRB_faceRemap = newMap;
PxU32* newMapInverse = PX_ALLOCATE(PxU32, collisionMesh.mNbTetrahedrons, "mGRB_faceRemapInverse");
for (PxU32 i = 0; i < collisionMesh.mNbTetrahedrons; ++i)
newMapInverse[collisionData.mGRB_faceRemap[i]] = i;
PX_FREE(collisionData.mGRB_faceRemapInverse);
collisionData.mGRB_faceRemapInverse = newMapInverse;
}
meshInterface.releaseRemap();
return true;
}
void BV32TetrahedronMeshBuilder::saveMidPhaseStructure(BV32Tree* bv32Tree, PxOutputStream& stream, bool mismatch)
{
// PT: in version 1 we defined "mismatch" as:
// const bool mismatch = (littleEndian() == 1);
// i.e. the data was *always* saved to file in big-endian format no matter what.
// In version>1 we now do the same as for other structures in the SDK: the data is
// exported either as little or big-endian depending on the passed parameter.
const PxU32 bv32StructureVersion = 2;
writeChunk('B', 'V', '3', '2', stream);
writeDword(bv32StructureVersion, mismatch, stream);
writeFloat(bv32Tree->mLocalBounds.mCenter.x, mismatch, stream);
writeFloat(bv32Tree->mLocalBounds.mCenter.y, mismatch, stream);
writeFloat(bv32Tree->mLocalBounds.mCenter.z, mismatch, stream);
writeFloat(bv32Tree->mLocalBounds.mExtentsMagnitude, mismatch, stream);
writeDword(bv32Tree->mInitData, mismatch, stream);
writeDword(bv32Tree->mNbPackedNodes, mismatch, stream);
PX_ASSERT(bv32Tree->mNbPackedNodes > 0);
for (PxU32 i = 0; i < bv32Tree->mNbPackedNodes; ++i)
{
BV32DataPacked& node = bv32Tree->mPackedNodes[i];
const PxU32 nbElements = node.mNbNodes * 4;
writeDword(node.mNbNodes, mismatch, stream);
writeDword(node.mDepth, mismatch, stream);
WriteDwordBuffer(node.mData, node.mNbNodes, mismatch, stream);
writeFloatBuffer(&node.mMin[0].x, nbElements, mismatch, stream);
writeFloatBuffer(&node.mMax[0].x, nbElements, mismatch, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
bool immediateCooking::cookTetrahedronMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& meshDesc, PxOutputStream& stream)
{
TetrahedronMeshData data;
TetrahedronMeshBuilder::computeTetData(meshDesc, data);
TetrahedronMeshBuilder::saveTetrahedronMeshData(stream, platformMismatch(), params, data);
return true;
}
PxTetrahedronMesh* immediateCooking::createTetrahedronMesh(const PxCookingParams& /*params*/, const PxTetrahedronMeshDesc& meshDesc, PxInsertionCallback& insertionCallback)
{
PX_FPU_GUARD;
TetrahedronMeshData tetData;
TetrahedronMeshBuilder::computeTetData(meshDesc, tetData);
PxConcreteType::Enum type = PxConcreteType::eTETRAHEDRON_MESH;
PxTetrahedronMesh* tetMesh = static_cast<PxTetrahedronMesh*>(insertionCallback.buildObjectFromData(type, &tetData));
return tetMesh;
}
bool immediateCooking::cookSoftBodyMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& simulationMeshDesc, const PxTetrahedronMeshDesc& collisionMeshDesc,
const PxSoftBodySimulationDataDesc& softbodyDataDesc, PxOutputStream& stream)
{
PX_FPU_GUARD;
TetrahedronMeshData simulationMesh;
SoftBodySimulationData simulationData;
TetrahedronMeshData collisionMesh;
SoftBodyCollisionData collisionData;
CollisionMeshMappingData mappingData;
SoftBodyMeshData data(simulationMesh, simulationData, collisionMesh, collisionData, mappingData);
if(!TetrahedronMeshBuilder::loadFromDesc(simulationMeshDesc, collisionMeshDesc, softbodyDataDesc, data.mSimulationMesh, data.mSimulationData, data.mCollisionMesh, data.mCollisionData, data.mMappingData, params, false))
return false;
TetrahedronMeshBuilder::saveSoftBodyMeshData(stream, platformMismatch(), params, data.mSimulationMesh, data.mSimulationData, data.mCollisionMesh, data.mCollisionData, data.mMappingData);
return true;
}
PxSoftBodyMesh* immediateCooking::createSoftBodyMesh(const PxCookingParams& params, const PxTetrahedronMeshDesc& simulationMeshDesc, const PxTetrahedronMeshDesc& collisionMeshDesc,
const PxSoftBodySimulationDataDesc& softbodyDataDesc, PxInsertionCallback& insertionCallback)
{
PX_UNUSED(simulationMeshDesc);
PX_UNUSED(collisionMeshDesc);
PX_UNUSED(softbodyDataDesc);
PX_UNUSED(insertionCallback);
// cooking code does lots of float bitwise reinterpretation that generates exceptions
PX_FPU_GUARD;
TetrahedronMeshData simulationMesh;
SoftBodySimulationData simulationData;
TetrahedronMeshData collisionMesh;
SoftBodyCollisionData collisionData;
CollisionMeshMappingData mappingData;
SoftBodyMeshData data(simulationMesh, simulationData, collisionMesh, collisionData, mappingData);
if(!TetrahedronMeshBuilder::loadFromDesc(simulationMeshDesc, collisionMeshDesc, softbodyDataDesc, data.mSimulationMesh, data.mSimulationData, data.mCollisionMesh, data.mCollisionData, data.mMappingData, params, false))
return NULL;
PxConcreteType::Enum type = PxConcreteType::eSOFTBODY_MESH;
PxSoftBodyMesh* tetMesh = static_cast<PxSoftBodyMesh*>(insertionCallback.buildObjectFromData(type, &data));
/*SoftbodySimulationTetrahedronMesh simulationMesh(data.simulationMesh, data.simulationData);
SoftbodyCollisionTetrahedronMesh collisionMesh(data.collisionMesh, data.collisionData);
SoftbodyShapeMapping embedding(data.mappingData);
SoftBodyMesh* tetMesh = NULL;
PX_NEW_SERIALIZED(tetMesh, SoftBodyMesh)(simulationMesh, collisionMesh, embedding);*/
return tetMesh;
}
PxCollisionMeshMappingData* immediateCooking::computeModelsMapping(const PxCookingParams& params, PxTetrahedronMeshData& simulationMesh, const PxTetrahedronMeshData& collisionMesh,
const PxSoftBodyCollisionData& collisionData, const PxBoundedData* vertexToTet)
{
CollisionMeshMappingData* mappingData = PX_NEW(CollisionMeshMappingData);
TetrahedronMeshBuilder::computeModelsMapping(*static_cast<TetrahedronMeshData*>(&simulationMesh),
*static_cast<const TetrahedronMeshData*>(&collisionMesh), *static_cast<const SoftBodyCollisionData*>(&collisionData), *mappingData, params.buildGPUData, vertexToTet);
return mappingData;
}
PxCollisionTetrahedronMeshData* immediateCooking::computeCollisionData(const PxCookingParams& params, const PxTetrahedronMeshDesc& collisionMeshDesc)
{
PX_UNUSED(collisionMeshDesc);
TetrahedronMeshData* mesh = PX_NEW(TetrahedronMeshData);
SoftBodyCollisionData* collisionData = PX_NEW(SoftBodyCollisionData);
if(!TetrahedronMeshBuilder::computeCollisionData(collisionMeshDesc, *mesh, *collisionData, params, false)) {
PX_FREE(mesh);
PX_FREE(collisionData);
return NULL;
}
CollisionTetrahedronMeshData* data = PX_NEW(CollisionTetrahedronMeshData);
data->mMesh = mesh;
data->mCollisionData = collisionData;
return data;
}
PxSimulationTetrahedronMeshData* immediateCooking::computeSimulationData(const PxCookingParams& params, const PxTetrahedronMeshDesc& simulationMeshDesc)
{
TetrahedronMeshData* mesh = PX_NEW(TetrahedronMeshData);
SoftBodySimulationData* simulationData = PX_NEW(SoftBodySimulationData);
//KS - This really needs the collision mesh as well.
TetrahedronMeshBuilder::computeSimData(simulationMeshDesc, *mesh, *simulationData, params);
SimulationTetrahedronMeshData* data = PX_NEW(SimulationTetrahedronMeshData);
data->mMesh = mesh;
data->mSimulationData = simulationData;
return data;
}
PxSoftBodyMesh* immediateCooking::assembleSoftBodyMesh(PxTetrahedronMeshData& simulationMesh, PxSoftBodySimulationData& simulationData, PxTetrahedronMeshData& collisionMesh,
PxSoftBodyCollisionData& collisionData, PxCollisionMeshMappingData& mappingData, PxInsertionCallback& insertionCallback)
{
SoftBodyMeshData data(static_cast<TetrahedronMeshData&>(simulationMesh),
static_cast<SoftBodySimulationData&>(simulationData),
static_cast<TetrahedronMeshData&>(collisionMesh),
static_cast<SoftBodyCollisionData&>(collisionData),
static_cast<CollisionMeshMappingData&>(mappingData));
PxConcreteType::Enum type = PxConcreteType::eSOFTBODY_MESH;
PxSoftBodyMesh* tetMesh = static_cast<PxSoftBodyMesh*>(insertionCallback.buildObjectFromData(type, &data));
return tetMesh;
}
PxSoftBodyMesh* immediateCooking::assembleSoftBodyMesh_Sim(PxSimulationTetrahedronMeshData& simulationMesh, PxCollisionTetrahedronMeshData& collisionMesh,
PxCollisionMeshMappingData& mappingData, PxInsertionCallback& insertionCallback)
{
return assembleSoftBodyMesh(*simulationMesh.getMesh(), *simulationMesh.getData(), *collisionMesh.getMesh(), *collisionMesh.getData(), mappingData, insertionCallback);
}
| 103,596 | C++ | 34.588114 | 219 | 0.735192 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingConvexHullLib.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_COOKING_CONVEX_HULL_LIB_H
#define GU_COOKING_CONVEX_HULL_LIB_H
#include "cooking/PxConvexMeshDesc.h"
#include "cooking/PxCooking.h"
namespace physx
{
//////////////////////////////////////////////////////////////////////////
// base class for the convex hull libraries - inflation based and quickhull
class ConvexHullLib
{
PX_NOCOPY(ConvexHullLib)
public:
// functions
ConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params)
: mConvexMeshDesc(desc), mCookingParams(params), mSwappedIndices(NULL),
mShiftedVerts(NULL)
{
}
virtual ~ConvexHullLib();
// computes the convex hull from provided points
virtual PxConvexMeshCookingResult::Enum createConvexHull() = 0;
// fills the PxConvexMeshDesc with computed hull data
virtual void fillConvexMeshDesc(PxConvexMeshDesc& desc) = 0;
// compute the edge list information if possible
virtual bool createEdgeList(const PxU32 nbIndices, const PxU8* indices, PxU8** hullDataFacesByEdges8, PxU16** edgeData16, PxU16** edges) = 0;
static const PxU32 gpuMaxVertsPerFace = 31;
protected:
// clean input vertices from duplicates, normalize etc.
bool cleanupVertices(PxU32 svcount, // input vertex count
const PxVec3* svertices, // vertices
PxU32 stride, // stride
PxU32& vcount, // output number of vertices
PxVec3* vertices); // location to store the results.
// shift vertices around origin and clean input vertices from duplicates, normalize etc.
bool shiftAndcleanupVertices(PxU32 svcount, // input vertex count
const PxVec3* svertices, // vertices
PxU32 stride, // stride
PxU32& vcount, // output number of vertices
PxVec3* vertices); // location to store the results.
void swapLargestFace(PxConvexMeshDesc& desc);
void shiftConvexMeshDesc(PxConvexMeshDesc& desc);
protected:
const PxConvexMeshDesc& mConvexMeshDesc;
const PxCookingParams& mCookingParams;
PxU32* mSwappedIndices;
PxVec3 mOriginShift;
PxVec3* mShiftedVerts;
};
}
#endif
| 3,729 | C | 39.107526 | 143 | 0.73934 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/cooking/GuCookingQuickHullConvexHullLib.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_COOKING_QUICKHULL_CONVEXHULLLIB_H
#define GU_COOKING_QUICKHULL_CONVEXHULLLIB_H
#include "GuCookingConvexHullLib.h"
#include "foundation/PxArray.h"
#include "foundation/PxUserAllocated.h"
namespace local
{
class QuickHull;
struct QuickHullVertex;
}
namespace physx
{
class ConvexHull;
//////////////////////////////////////////////////////////////////////////
// Quickhull lib constructs the hull from given input points. The resulting hull
// will only contain a subset of the input points. The algorithm does incrementally
// adds most furthest vertices to the starting simplex. The produced hulls are build with high precision
// and produce more stable and correct results, than the legacy algorithm.
class QuickHullConvexHullLib: public ConvexHullLib, public PxUserAllocated
{
PX_NOCOPY(QuickHullConvexHullLib)
public:
// functions
QuickHullConvexHullLib(const PxConvexMeshDesc& desc, const PxCookingParams& params);
~QuickHullConvexHullLib();
// computes the convex hull from provided points
virtual PxConvexMeshCookingResult::Enum createConvexHull();
// fills the convexmeshdesc with computed hull data
virtual void fillConvexMeshDesc(PxConvexMeshDesc& desc);
// provide the edge list information
virtual bool createEdgeList(const PxU32, const PxU8* , PxU8** , PxU16** , PxU16** );
protected:
// if vertex limit reached we need to expand the hull using the OBB slicing
PxConvexMeshCookingResult::Enum expandHullOBB();
// if vertex limit reached we need to expand the hull using the plane shifting
PxConvexMeshCookingResult::Enum expandHull();
// checks for collinearity and co planarity
// returns true if the simplex was ok, we can reuse the computed tolerances and min/max values
bool cleanupForSimplex(PxVec3* vertices, PxU32 vertexCount, local::QuickHullVertex* minimumVertex,
local::QuickHullVertex* maximumVertex, float& tolerance, float& planeTolerance);
// fill the result desc from quick hull convex
void fillConvexMeshDescFromQuickHull(PxConvexMeshDesc& desc);
// fill the result desc from cropped hull convex
void fillConvexMeshDescFromCroppedHull(PxConvexMeshDesc& desc);
private:
local::QuickHull* mQuickHull; // the internal quick hull representation
ConvexHull* mCropedConvexHull; //the hull cropped from OBB, used for vertex limit path
PxU8* mOutMemoryBuffer; // memory buffer used for output data
PxU16* mFaceTranslateTable; // translation table mapping output faces to internal quick hull table
};
}
#endif
| 4,242 | C | 42.295918 | 105 | 0.759547 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/hf/GuOverlapTestsHF.cpp | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#include "GuOverlapTests.h"
#include "GuHeightFieldUtil.h"
#include "GuBoxConversion.h"
#include "GuInternal.h"
#include "GuVecConvexHull.h"
#include "GuEntityReport.h"
#include "GuDistancePointTriangle.h"
#include "GuIntersectionCapsuleTriangle.h"
#include "GuDistanceSegmentTriangle.h"
#include "GuBounds.h"
#include "GuBV4_Common.h"
#include "GuVecTriangle.h"
#include "GuConvexMesh.h"
#include "GuGJK.h"
#include "geometry/PxSphereGeometry.h"
using namespace physx;
using namespace Gu;
using namespace aos;
///////////////////////////////////////////////////////////////////////////////
namespace
{
struct HeightfieldOverlapReport : Gu::OverlapReport
{
PX_NOCOPY(HeightfieldOverlapReport)
public:
HeightfieldOverlapReport(const PxHeightFieldGeometry& hfGeom, const PxTransform& hfPose) : mHfUtil(hfGeom), mHFPose(hfPose), mOverlap(PxIntFalse) {}
const HeightFieldUtil mHfUtil;
const PxTransform& mHFPose;
PxIntBool mOverlap;
};
}
bool GeomOverlapCallback_SphereHeightfield(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eSPHERE);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxSphereGeometry& sphereGeom = static_cast<const PxSphereGeometry&>(geom0);
const PxHeightFieldGeometry& hfGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
struct SphereOverlapReport : HeightfieldOverlapReport
{
Sphere mLocalSphere;
SphereOverlapReport(const PxHeightFieldGeometry& hfGeom_, const PxTransform& hfPose, const PxVec3& localSphereCenter, float sphereRadius) : HeightfieldOverlapReport(hfGeom_, hfPose)
{
mLocalSphere.center = localSphereCenter;
mLocalSphere.radius = sphereRadius * sphereRadius;
}
virtual bool reportTouchedTris(PxU32 nb, const PxU32* indices)
{
while(nb--)
{
const PxU32 triangleIndex = *indices++;
PxTriangle currentTriangle;
mHfUtil.getTriangle(mHFPose, currentTriangle, NULL, NULL, triangleIndex, false, false);
const PxVec3& p0 = currentTriangle.verts[0];
const PxVec3& p1 = currentTriangle.verts[1];
const PxVec3& p2 = currentTriangle.verts[2];
const PxVec3 edge10 = p1 - p0;
const PxVec3 edge20 = p2 - p0;
const PxVec3 cp = closestPtPointTriangle2(mLocalSphere.center, p0, p1, p2, edge10, edge20);
const float sqrDist = (cp - mLocalSphere.center).magnitudeSquared();
if(sqrDist <= mLocalSphere.radius) // mLocalSphere.radius has been pre-squared in the ctor
{
mOverlap = PxIntTrue;
return false;
}
}
return true;
}
};
PxBounds3 localBounds;
const PxVec3 localSphereCenter = getLocalSphereData(localBounds, pose0, pose1, sphereGeom.radius);
SphereOverlapReport report(hfGeom, pose1, localSphereCenter, sphereGeom.radius);
report.mHfUtil.overlapAABBTriangles(localBounds, report, 4);
return report.mOverlap!=PxIntFalse;
}
///////////////////////////////////////////////////////////////////////////////
bool GeomOverlapCallback_CapsuleHeightfield(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCAPSULE);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxCapsuleGeometry& capsuleGeom = static_cast<const PxCapsuleGeometry&>(geom0);
const PxHeightFieldGeometry& hfGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
struct CapsuleOverlapReport : HeightfieldOverlapReport
{
Capsule mLocalCapsule;
CapsuleTriangleOverlapData mData;
CapsuleOverlapReport(const PxHeightFieldGeometry& hfGeom_, const PxTransform& hfPose) : HeightfieldOverlapReport(hfGeom_, hfPose) {}
virtual bool reportTouchedTris(PxU32 nb, const PxU32* indices)
{
while(nb--)
{
const PxU32 triangleIndex = *indices++;
PxTriangle currentTriangle;
mHfUtil.getTriangle(mHFPose, currentTriangle, NULL, NULL, triangleIndex, false, false);
const PxVec3& p0 = currentTriangle.verts[0];
const PxVec3& p1 = currentTriangle.verts[1];
const PxVec3& p2 = currentTriangle.verts[2];
if(0)
{
PxReal t,u,v;
const PxVec3 p1_p0 = p1 - p0;
const PxVec3 p2_p0 = p2 - p0;
const PxReal sqrDist = distanceSegmentTriangleSquared(mLocalCapsule, p0, p1_p0, p2_p0, &t, &u, &v);
if(sqrDist <= mLocalCapsule.radius*mLocalCapsule.radius)
{
mOverlap = PxIntTrue;
return false;
}
}
else
{
const PxVec3 normal = (p0 - p1).cross(p0 - p2);
if(intersectCapsuleTriangle(normal, p0, p1, p2, mLocalCapsule, mData))
{
mOverlap = PxIntTrue;
return false;
}
}
}
return true;
}
};
CapsuleOverlapReport report(hfGeom, pose1);
// PT: TODO: move away from internal header
const PxVec3 tmp = getCapsuleHalfHeightVector(pose0, capsuleGeom);
// PT: TODO: refactor - but might be difficult because we reuse relPose for two tasks here
const PxTransform relPose = pose1.transformInv(pose0);
const PxVec3 localDelta = pose1.rotateInv(tmp);
report.mLocalCapsule.p0 = relPose.p + localDelta;
report.mLocalCapsule.p1 = relPose.p - localDelta;
report.mLocalCapsule.radius = capsuleGeom.radius;
report.mData.init(report.mLocalCapsule);
PxBounds3 localBounds;
computeCapsuleBounds(localBounds, capsuleGeom, relPose);
report.mHfUtil.overlapAABBTriangles(localBounds, report, 4);
//hfUtil.overlapAABBTriangles(pose0, pose1, getLocalCapsuleBounds(capsuleGeom.radius, capsuleGeom.halfHeight), report, 4);
return report.mOverlap!=PxIntFalse;
}
///////////////////////////////////////////////////////////////////////////////
PxIntBool intersectTriangleBoxBV4(const PxVec3& p0, const PxVec3& p1, const PxVec3& p2,
const PxMat33& rotModelToBox, const PxVec3& transModelToBox, const PxVec3& extents);
bool GeomOverlapCallback_BoxHeightfield(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eBOX);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxBoxGeometry& boxGeom = static_cast<const PxBoxGeometry&>(geom0);
const PxHeightFieldGeometry& hfGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
struct BoxOverlapReport : HeightfieldOverlapReport
{
PxMat33 mRModelToBox;
PxVec3p mTModelToBox;
PxVec3p mBoxExtents;
BoxOverlapReport(const PxHeightFieldGeometry& hfGeom_, const PxTransform& hfPose) : HeightfieldOverlapReport(hfGeom_, hfPose) {}
virtual bool reportTouchedTris(PxU32 nb, const PxU32* indices)
{
while(nb--)
{
const PxU32 triangleIndex = *indices++;
PxTrianglePadded currentTriangle;
mHfUtil.getTriangle(mHFPose, currentTriangle, NULL, NULL, triangleIndex, false, false);
if(intersectTriangleBoxBV4(currentTriangle.verts[0], currentTriangle.verts[1], currentTriangle.verts[2], mRModelToBox, mTModelToBox, mBoxExtents))
{
mOverlap = PxIntTrue;
return false;
}
}
return true;
}
};
BoxOverlapReport report(hfGeom, pose1);
// PT: TODO: revisit / refactor all this code
const PxTransform relPose = pose1.transformInv(pose0);
Box localBox;
buildFrom(localBox, relPose.p, boxGeom.halfExtents, relPose.q);
invertBoxMatrix(report.mRModelToBox, report.mTModelToBox, localBox);
report.mBoxExtents = localBox.extents;
PxBounds3 localBounds;
{
// PT: TODO: refactor with bounds code?
const PxMat33& basis = localBox.rot;
// extended basis vectors
const Vec4V c0V = V4Scale(V4LoadU(&basis.column0.x), FLoad(localBox.extents.x));
const Vec4V c1V = V4Scale(V4LoadU(&basis.column1.x), FLoad(localBox.extents.y));
const Vec4V c2V = V4Scale(V4LoadU(&basis.column2.x), FLoad(localBox.extents.z));
// find combination of base vectors that produces max. distance for each component = sum of abs()
Vec4V extentsV = V4Add(V4Abs(c0V), V4Abs(c1V));
extentsV = V4Add(extentsV, V4Abs(c2V));
const PxVec3p origin(localBox.center);
const Vec4V originV = V4LoadU(&origin.x);
const Vec4V minV = V4Sub(originV, extentsV);
const Vec4V maxV = V4Add(originV, extentsV);
StoreBounds(localBounds, minV, maxV);
}
report.mHfUtil.overlapAABBTriangles(localBounds, report, 4);
return report.mOverlap!=PxIntFalse;
}
///////////////////////////////////////////////////////////////////////////////
bool GeomOverlapCallback_ConvexHeightfield(GU_OVERLAP_FUNC_PARAMS)
{
PX_ASSERT(geom0.getType()==PxGeometryType::eCONVEXMESH);
PX_ASSERT(geom1.getType()==PxGeometryType::eHEIGHTFIELD);
PX_UNUSED(cache);
PX_UNUSED(threadContext);
const PxConvexMeshGeometry& convexGeom = static_cast<const PxConvexMeshGeometry&>(geom0);
const PxHeightFieldGeometry& hfGeom = static_cast<const PxHeightFieldGeometry&>(geom1);
struct ConvexOverlapReport : HeightfieldOverlapReport
{
ConvexHullV mConvex;
PxMatTransformV aToB;
ConvexOverlapReport(const PxHeightFieldGeometry& hfGeom_, const PxTransform& hfPose) : HeightfieldOverlapReport(hfGeom_, hfPose) {}
virtual bool reportTouchedTris(PxU32 nb, const PxU32* indices)
{
while(nb--)
{
const PxU32 triangleIndex = *indices++;
PxTrianglePadded currentTriangle;
mHfUtil.getTriangle(mHFPose, currentTriangle, NULL, NULL, triangleIndex, false, false);
const PxVec3& p0 = currentTriangle.verts[0];
const PxVec3& p1 = currentTriangle.verts[1];
const PxVec3& p2 = currentTriangle.verts[2];
// PT: TODO: consider adding an extra triangle-vs-box culling test here
// PT: TODO: optimize
const Vec3V v0 = V3LoadU(p0);
const Vec3V v1 = V3LoadU(p1);
const Vec3V v2 = V3LoadU(p2);
// PT: TODO: refactor with ConvexVsMeshOverlapCallback
TriangleV triangle(v0, v1, v2);
Vec3V contactA, contactB, normal;
FloatV dist;
const RelativeConvex<TriangleV> convexA(triangle, aToB);
const LocalConvex<ConvexHullV> convexB(mConvex);
const GjkStatus status = gjk(convexA, convexB, aToB.p, FZero(), contactA, contactB, normal, dist);
if(status == GJK_CONTACT || status == GJK_CLOSE)// || FAllGrtrOrEq(mSqTolerance, sqDist))
{
mOverlap = PxIntTrue;
return false;
}
}
return true;
}
};
ConvexOverlapReport report(hfGeom, pose1);
const ConvexMesh* cm = static_cast<const ConvexMesh*>(convexGeom.convexMesh);
const bool idtScaleConvex = convexGeom.scale.isIdentity();
{
const ConvexHullData* hullData = &cm->getHull();
const Vec3V vScale0 = V3LoadU_SafeReadW(convexGeom.scale.scale); // PT: safe because 'rotation' follows 'scale' in PxMeshScale
const QuatV vQuat0 = QuatVLoadU(&convexGeom.scale.rotation.x);
report.mConvex = ConvexHullV(hullData, V3Zero(), vScale0, vQuat0, idtScaleConvex);
// PT: TODO: is that transform correct? It looks like the opposite of what we do for other prims?
report.aToB = PxMatTransformV(pose0.transformInv(pose1));
//report.aToB = PxMatTransformV(pose1.transformInv(pose0));
}
const PxTransform relPose = pose1.transformInv(pose0);
PxBounds3 localBounds;
computeBounds(localBounds, convexGeom, relPose, 0.0f, 1.0f);
report.mHfUtil.overlapAABBTriangles(localBounds, report, 4);
return report.mOverlap!=PxIntFalse;
}
| 12,744 | C++ | 34.207182 | 183 | 0.726381 |
NVIDIA-Omniverse/PhysX/physx/source/geomutils/src/hf/GuHeightField.h | // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ''AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Copyright (c) 2008-2023 NVIDIA Corporation. All rights reserved.
// Copyright (c) 2004-2008 AGEIA Technologies, Inc. All rights reserved.
// Copyright (c) 2001-2004 NovodeX AG. All rights reserved.
#ifndef GU_HEIGHTFIELD_H
#define GU_HEIGHTFIELD_H
#include "geometry/PxHeightFieldSample.h"
#include "geometry/PxHeightFieldDesc.h"
#include "geometry/PxHeightField.h"
#include "geometry/PxHeightFieldGeometry.h"
#include "foundation/PxUserAllocated.h"
#include "CmRefCountable.h"
#include "GuSphere.h"
#include "GuHeightFieldData.h"
//#define PX_HEIGHTFIELD_VERSION 0
//#define PX_HEIGHTFIELD_VERSION 1 // tiled version that was needed for PS3 only has been removed
#define PX_HEIGHTFIELD_VERSION 2 // some floats are now integers
namespace physx
{
class PxHeightFieldDesc;
namespace Gu
{
class MeshFactory;
class HeightField : public PxHeightField, public PxUserAllocated
{
public:
// PX_SERIALIZATION
HeightField(PxBaseFlags baseFlags) : PxHeightField(baseFlags), mData(PxEmpty), mModifyCount(0) {}
void preExportDataReset() { Cm::RefCountable_preExportDataReset(*this); }
virtual void exportExtraData(PxSerializationContext& context);
void importExtraData(PxDeserializationContext& context);
PX_FORCE_INLINE void setMeshFactory(MeshFactory* f) { mMeshFactory = f; }
PX_PHYSX_COMMON_API static HeightField* createObject(PxU8*& address, PxDeserializationContext& context);
PX_PHYSX_COMMON_API static void getBinaryMetaData(PxOutputStream& stream);
void resolveReferences(PxDeserializationContext&) {}
virtual void requiresObjects(PxProcessPxBaseCallback&){}
//~PX_SERIALIZATION
HeightField(MeshFactory* factory);
HeightField(MeshFactory* factory, Gu::HeightFieldData& data);
// PxHeightField
virtual void release();
virtual PxU32 saveCells(void* destBuffer, PxU32 destBufferSize) const;
virtual bool modifySamples(PxI32 startCol, PxI32 startRow, const PxHeightFieldDesc& subfieldDesc, bool shrinkBounds);
virtual PxU32 getNbRows() const { return mData.rows; }
virtual PxU32 getNbColumns() const { return mData.columns; }
virtual PxHeightFieldFormat::Enum getFormat() const { return mData.format; }
virtual PxU32 getSampleStride() const { return sizeof(PxHeightFieldSample); }
virtual PxReal getConvexEdgeThreshold() const { return mData.convexEdgeThreshold; }
virtual PxHeightFieldFlags getFlags() const { return mData.flags; }
virtual PxReal getHeight(PxReal x, PxReal z) const { return getHeightInternal(x, z); }
virtual PxMaterialTableIndex getTriangleMaterialIndex(PxTriangleID triangleIndex) const
{
return getTriangleMaterial(triangleIndex);
}
virtual PxVec3 getTriangleNormal(PxTriangleID triangleIndex) const
{
return getTriangleNormalInternal(triangleIndex);
}
virtual const PxHeightFieldSample& getSample(PxU32 row, PxU32 column) const
{
const PxU32 cell = row * getNbColumnsFast() + column;
return getSample(cell);
}
virtual PxU32 getTimestamp() const { return mModifyCount; }
//~PxHeightField
// PxRefCounted
virtual void acquireReference();
virtual PxU32 getReferenceCount() const;
//~PxRefCounted
// PxBase
virtual void onRefCountZero();
//~PxBase
bool loadFromDesc(const PxHeightFieldDesc&);
bool load(PxInputStream&);
bool save(PxOutputStream& stream, bool endianSwap);
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getNbRowsFast() const { return mData.rows; }
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getNbColumnsFast() const { return mData.columns; }
PX_FORCE_INLINE PxHeightFieldFormat::Enum getFormatFast() const { return mData.format; }
PX_FORCE_INLINE PxU32 getFlagsFast() const { return mData.flags; }
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isZerothVertexShared(PxU32 vertexIndex) const
{
// return (getSample(vertexIndex).tessFlag & PxHeightFieldTessFlag::e0TH_VERTEX_SHARED);
return getSample(vertexIndex).tessFlag() != 0;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU16 getMaterialIndex0(PxU32 vertexIndex) const { return getSample(vertexIndex).materialIndex0; }
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU16 getMaterialIndex1(PxU32 vertexIndex) const { return getSample(vertexIndex).materialIndex1; }
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getMaterialIndex01(PxU32 vertexIndex) const
{
const PxHeightFieldSample& sample = getSample(vertexIndex);
return PxU32(sample.materialIndex0 | (sample.materialIndex1 << 16));
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxReal getHeight(PxU32 vertexIndex) const
{
return PxReal(getSample(vertexIndex).height);
}
PX_INLINE PxReal getHeightInternal2(PxU32 vertexIndex, PxReal fracX, PxReal fracZ) const;
PX_FORCE_INLINE PxReal getHeightInternal(PxReal x, PxReal z) const
{
PxReal fracX, fracZ;
const PxU32 vertexIndex = computeCellCoordinates(x, z, fracX, fracZ);
return getHeightInternal2(vertexIndex, fracX, fracZ);
}
PX_FORCE_INLINE bool isValidVertex(PxU32 vertexIndex) const { return vertexIndex < mData.rows*mData.columns; }
PX_INLINE PxVec3 getVertex(PxU32 vertexIndex) const;
PX_INLINE bool isConvexVertex(PxU32 vertexIndex, PxU32 row, PxU32 column) const;
PX_INLINE bool isValidEdge(PxU32 edgeIndex) const;
PX_INLINE PxU32 getEdgeTriangleIndices(PxU32 edgeIndex, PxU32 triangleIndices[2]) const;
PX_INLINE PxU32 getEdgeTriangleIndices(PxU32 edgeIndex, PxU32 triangleIndices[2], PxU32 cell, PxU32 row, PxU32 column) const;
PX_INLINE void getEdgeVertexIndices(PxU32 edgeIndex, PxU32& vertexIndex0, PxU32& vertexIndex1) const;
// PX_INLINE bool isConvexEdge(PxU32 edgeIndex) const;
PX_INLINE bool isConvexEdge(PxU32 edgeIndex, PxU32 cell, PxU32 row, PxU32 column) const;
PX_FORCE_INLINE bool isConvexEdge(PxU32 edgeIndex) const
{
const PxU32 cell = edgeIndex / 3;
const PxU32 row = cell / mData.columns;
const PxU32 column = cell % mData.columns;
return isConvexEdge(edgeIndex, cell, row, column);
}
PxU32 computeCellCoordinates(PxReal x, PxReal z, PxReal& fracX, PxReal& fracZ) const;
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getMin(PxReal x, PxU32 nb) const
{
if(x<0.0f)
return 0;
if(x>PxReal(nb))
return nb;
const PxReal cx = PxFloor(x);
const PxU32 icx = PxU32(cx);
return icx;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getMax(PxReal x, PxU32 nb) const
{
if(x<0.0f)
return 0;
if(x>PxReal(nb))
return nb;
const PxReal cx = PxCeil(x);
const PxU32 icx = PxU32(cx);
return icx;
}
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getMinRow(PxReal x) const { return getMin(x, mData.rows-2); }
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getMaxRow(PxReal x) const { return getMax(x, mData.rows-1); }
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getMinColumn(PxReal z) const { return getMin(z, mData.columns-2); }
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU32 getMaxColumn(PxReal z) const { return getMax(z, mData.columns-1); }
PX_CUDA_CALLABLE PX_INLINE bool isValidTriangle(PxU32 triangleIndex) const;
PX_CUDA_CALLABLE PX_FORCE_INLINE bool isFirstTriangle(PxU32 triangleIndex) const { return ((triangleIndex & 0x1) == 0); }
PX_CUDA_CALLABLE PX_FORCE_INLINE PxU16 getTriangleMaterial(PxU32 triangleIndex) const
{
return isFirstTriangle(triangleIndex) ? getMaterialIndex0(triangleIndex >> 1) : getMaterialIndex1(triangleIndex >> 1);
}
PX_CUDA_CALLABLE PX_INLINE void getTriangleVertexIndices(PxU32 triangleIndex, PxU32& vertexIndex0, PxU32& vertexIndex1, PxU32& vertexIndex2) const;
PX_CUDA_CALLABLE PX_INLINE PxVec3 getTriangleNormalInternal(PxU32 triangleIndex) const;
PX_INLINE void getTriangleAdjacencyIndices(PxU32 triangleIndex,PxU32 vertexIndex0, PxU32 vertexIndex1, PxU32 vertexIndex2, PxU32& adjacencyIndex0, PxU32& adjacencyIndex1, PxU32& adjacencyIndex2) const;
PX_INLINE PxVec3 getNormal_2(PxU32 vertexIndex, PxReal fracX, PxReal fracZ, PxReal xcoeff, PxReal ycoeff, PxReal zcoeff) const;
PX_FORCE_INLINE PxVec3 getNormal_(PxReal x, PxReal z, PxReal xcoeff, PxReal ycoeff, PxReal zcoeff) const
{
PxReal fracX, fracZ;
const PxU32 vertexIndex = computeCellCoordinates(x, z, fracX, fracZ);
return getNormal_2(vertexIndex, fracX, fracZ, xcoeff, ycoeff, zcoeff);
}
PX_INLINE PxU32 getTriangleIndex(PxReal x, PxReal z) const;
PX_INLINE PxU32 getTriangleIndex2(PxU32 cell, PxReal fracX, PxReal fracZ) const;
PX_FORCE_INLINE PxU16 getMaterial(PxReal x, PxReal z) const
{
return getTriangleMaterial(getTriangleIndex(x, z));
}
PX_FORCE_INLINE PxReal getMinHeight() const { return mMinHeight; }
PX_FORCE_INLINE PxReal getMaxHeight() const { return mMaxHeight; }
PX_FORCE_INLINE const Gu::HeightFieldData& getData() const { return mData; }
PX_CUDA_CALLABLE PX_FORCE_INLINE void getTriangleVertices(PxU32 triangleIndex, PxU32 row, PxU32 column, PxVec3& v0, PxVec3& v1, PxVec3& v2) const;
// checks if current vertex is solid or not
bool isSolidVertex(PxU32 vertexIndex, PxU32 row, PxU32 coloumn, PxU16 holeMaterialIndex, bool& nbSolid) const;
// PT: TODO: I think we could drop that whole precomputation thing now
// if precomputed bitmap define is used, the collision vertex information
// is precomputed during create height field and stored as a bit in materialIndex1
PX_PHYSX_COMMON_API bool isCollisionVertexPreca(PxU32 vertexIndex, PxU32 row, PxU32 column, PxU16 holeMaterialIndex) const;
void parseTrianglesForCollisionVertices(PxU16 holeMaterialIndex);
PX_CUDA_CALLABLE PX_FORCE_INLINE const PxHeightFieldSample& getSample(PxU32 vertexIndex) const
{
PX_ASSERT(isValidVertex(vertexIndex));
return mData.samples[vertexIndex];
}
#ifdef __CUDACC__
PX_CUDA_CALLABLE void setSamplePtr(PxHeightFieldSample* s) { mData.samples = s; }
#endif
Gu::HeightFieldData mData;
PxU32 mSampleStride;
PxU32 mNbSamples; // PT: added for platform conversion. Try to remove later.
PxReal mMinHeight;
PxReal mMaxHeight;
PxU32 mModifyCount;
void releaseMemory();
virtual ~HeightField();
private:
MeshFactory* mMeshFactory; // PT: changed to pointer for serialization
};
} // namespace Gu
PX_INLINE PxVec3 Gu::HeightField::getVertex(PxU32 vertexIndex) const
{
const PxU32 row = vertexIndex / mData.columns;
const PxU32 column = vertexIndex % mData.columns;
// return PxVec3(PxReal(row), getHeight(row * mData.columns + column), PxReal(column));
return PxVec3(PxReal(row), getHeight(vertexIndex), PxReal(column));
}
// PT: only called from "isCollisionVertex", should move
PX_INLINE bool Gu::HeightField::isConvexVertex(PxU32 vertexIndex, PxU32 row, PxU32 column) const
{
#ifdef PX_HEIGHTFIELD_DEBUG
PX_ASSERT(isValidVertex(vertexIndex));
#endif
PX_ASSERT((vertexIndex / mData.columns)==row);
PX_ASSERT((vertexIndex % mData.columns)==column);
// PxReal h0 = PxReal(2) * getHeight(vertexIndex);
PxI32 h0 = getSample(vertexIndex).height;
h0 += h0;
bool definedInX, definedInZ;
PxI32 convexityX, convexityZ;
if ((row > 0) && (row < mData.rows - 1))
{
// convexityX = h0 - getHeight(vertexIndex + mData.columns) - getHeight(vertexIndex - mData.columns);
convexityX = h0 - getSample(vertexIndex + mData.columns).height - getSample(vertexIndex - mData.columns).height;
definedInX = true;
}
else
{
convexityX = 0;
definedInX = false;
}
if ((column > 0) && (column < mData.columns - 1))
{
// convexityZ = h0 - getHeight(vertexIndex + 1) - getHeight(vertexIndex - 1);
convexityZ = h0 - getSample(vertexIndex + 1).height - getSample(vertexIndex - 1).height;
definedInZ = true;
}
else
{
convexityZ = 0;
definedInZ = false;
}
if(definedInX || definedInZ)
{
// PT: use XOR here
// saddle points
/* if ((convexityX > 0) && (convexityZ < 0))
return false;
if ((convexityX < 0) && (convexityZ > 0))
return false;*/
if(((convexityX ^ convexityZ) & 0x80000000)==0)
return false;
const PxReal value = PxReal(convexityX + convexityZ);
return value > mData.convexEdgeThreshold;
}
// this has to be one of the two corner vertices
return true;
}
PX_INLINE bool Gu::HeightField::isValidEdge(PxU32 edgeIndex) const
{
const PxU32 cell = (edgeIndex / 3);
const PxU32 row = cell / mData.columns;
const PxU32 column = cell % mData.columns;
// switch (edgeIndex % 3)
switch (edgeIndex - cell*3)
{
case 0:
if (row > mData.rows - 1) return false;
if (column >= mData.columns - 1) return false;
break;
case 1:
if (row >= mData.rows - 1) return false;
if (column >= mData.columns - 1) return false;
break;
case 2:
if (row >= mData.rows - 1) return false;
if (column > mData.columns - 1) return false;
break;
}
return true;
}
PX_INLINE PxU32 Gu::HeightField::getEdgeTriangleIndices(PxU32 edgeIndex, PxU32 triangleIndices[2]) const
{
const PxU32 cell = edgeIndex / 3;
const PxU32 row = cell / mData.columns;
const PxU32 column = cell % mData.columns;
PxU32 count = 0;
// switch (edgeIndex % 3)
switch (edgeIndex - cell*3)
{
case 0:
if (column < mData.columns - 1)
{
if (row > 0)
{
/* if (isZerothVertexShared(cell - mData.columns))
triangleIndices[count++] = ((cell - mData.columns) << 1);
else
triangleIndices[count++] = ((cell - mData.columns) << 1) + 1;*/
triangleIndices[count++] = ((cell - mData.columns) << 1) + 1 - isZerothVertexShared(cell - mData.columns);
}
if (row < mData.rows - 1)
{
/* if (isZerothVertexShared(cell))
triangleIndices[count++] = (cell << 1) + 1;
else
triangleIndices[count++] = cell << 1;*/
triangleIndices[count++] = (cell << 1) + isZerothVertexShared(cell);
}
}
break;
case 1:
if ((row < mData.rows - 1) && (column < mData.columns - 1))
{
triangleIndices[count++] = cell << 1;
triangleIndices[count++] = (cell << 1) + 1;
}
break;
case 2:
if (row < mData.rows - 1)
{
if (column > 0)
triangleIndices[count++] = ((cell - 1) << 1) + 1;
if (column < mData.columns - 1)
triangleIndices[count++] = cell << 1;
}
break;
}
return count;
}
PX_INLINE PxU32 Gu::HeightField::getEdgeTriangleIndices(PxU32 edgeIndex, PxU32 triangleIndices[2], PxU32 cell, PxU32 row, PxU32 column) const
{
// const PxU32 cell = edgeIndex / 3;
// const PxU32 row = cell / mData.columns;
// const PxU32 column = cell % mData.columns;
PxU32 count = 0;
// switch (edgeIndex % 3)
switch (edgeIndex - cell*3)
{
case 0:
if (column < mData.columns - 1)
{
if (row > 0)
{
/* if (isZerothVertexShared(cell - mData.columns))
triangleIndices[count++] = ((cell - mData.columns) << 1);
else
triangleIndices[count++] = ((cell - mData.columns) << 1) + 1;*/
triangleIndices[count++] = ((cell - mData.columns) << 1) + 1 - isZerothVertexShared(cell - mData.columns);
}
if (row < mData.rows - 1)
{
/* if (isZerothVertexShared(cell))
triangleIndices[count++] = (cell << 1) + 1;
else
triangleIndices[count++] = cell << 1;*/
triangleIndices[count++] = (cell << 1) + isZerothVertexShared(cell);
}
}
break;
case 1:
if ((row < mData.rows - 1) && (column < mData.columns - 1))
{
triangleIndices[count++] = cell << 1;
triangleIndices[count++] = (cell << 1) + 1;
}
break;
case 2:
if (row < mData.rows - 1)
{
if (column > 0)
triangleIndices[count++] = ((cell - 1) << 1) + 1;
if (column < mData.columns - 1)
triangleIndices[count++] = cell << 1;
}
break;
}
return count;
}
PX_INLINE void Gu::HeightField::getEdgeVertexIndices(PxU32 edgeIndex, PxU32& vertexIndex0, PxU32& vertexIndex1) const
{
const PxU32 cell = edgeIndex / 3;
// switch (edgeIndex % 3)
switch (edgeIndex - cell*3)
{
case 0:
vertexIndex0 = cell;
vertexIndex1 = cell + 1;
break;
case 1:
{
/* if (isZerothVertexShared(cell))
{
vertexIndex0 = cell;
vertexIndex1 = cell + mData.columns + 1;
}
else
{
vertexIndex0 = cell + 1;
vertexIndex1 = cell + mData.columns;
}*/
const bool b = isZerothVertexShared(cell);
vertexIndex0 = cell + 1 - b;
vertexIndex1 = cell + mData.columns + b;
}
break;
case 2:
vertexIndex0 = cell;
vertexIndex1 = cell + mData.columns;
break;
}
}
PX_INLINE bool Gu::HeightField::isConvexEdge(PxU32 edgeIndex, PxU32 cell, PxU32 row, PxU32 column) const
{
// const PxU32 cell = edgeIndex / 3;
PX_ASSERT(cell == edgeIndex / 3);
// const PxU32 row = cell / mData.columns;
PX_ASSERT(row == cell / mData.columns);
if (row > mData.rows-2) return false;
// const PxU32 column = cell % mData.columns;
PX_ASSERT(column == cell % mData.columns);
if (column > mData.columns-2) return false;
// PxReal h0 = 0, h1 = 0, h2 = 0, h3 = 0;
// PxReal convexity = 0;
PxI32 h0 = 0, h1 = 0, h2 = 0, h3 = 0;
PxI32 convexity = 0;
// switch (edgeIndex % 3)
switch (edgeIndex - cell*3)
{
case 0:
{
if (row < 1) return false;
/* if(isZerothVertexShared(cell - mData.columns))
{
// <------ COL
// +----+ 0 R
// | / /# O
// | / / # W
// | / / # |
// |/ / # |
// + +====1 |
// |
// |
// |
// |
// |
// |
// V
//
// h0 = getHeight(cell - mData.columns);
// h1 = getHeight(cell);
h0 = getSample(cell - mData.columns).height;
h1 = getSample(cell).height;
}
else
{
// <------ COL
// 0 +----+ R
// #\ \ | O
// # \ \ | W
// # \ \ | |
// # \ \| |
// 1====+ + |
// |
// |
// |
// |
// |
// |
// V
//
// h0 = getHeight(cell - mData.columns + 1);
// h1 = getHeight(cell + 1);
h0 = getSample(cell - mData.columns + 1).height;
h1 = getSample(cell + 1).height;
}*/
const bool b0 = !isZerothVertexShared(cell - mData.columns);
h0 = getSample(cell - mData.columns + b0).height;
h1 = getSample(cell + b0).height;
/* if(isZerothVertexShared(cell))
{
// <------ COL
// R
// O
// W
// |
// |
// |
// 2====+ 0 |
// # / /| |
// # / / | |
// # / / | |
// #/ / | |
// 3 +----+ |
// V
//
// h2 = getHeight(cell + 1);
// h3 = getHeight(cell + mData.columns + 1);
h2 = getSample(cell + 1).height;
h3 = getSample(cell + mData.columns + 1).height;
}
else
{
// <------ COL
// R
// O
// W
// |
// |
// |
// + +====2 |
// |\ \ # |
// | \ \ # |
// | \ \ # |
// | \ \# |
// +----+ 3 |
// V
//
// h2 = getHeight(cell);
// h3 = getHeight(cell + mData.columns);
h2 = getSample(cell).height;
h3 = getSample(cell + mData.columns).height;
}*/
const bool b1 = isZerothVertexShared(cell);
h2 = getSample(cell + b1).height;
h3 = getSample(cell + mData.columns + b1).height;
//convex = (h3-h2) < (h1-h0);
convexity = (h1-h0) - (h3-h2);
}
break;
case 1:
// h0 = getHeight(cell);
// h1 = getHeight(cell + 1);
// h2 = getHeight(cell + mData.columns);
// h3 = getHeight(cell + mData.columns + 1);
h0 = getSample(cell).height;
h1 = getSample(cell + 1).height;
h2 = getSample(cell + mData.columns).height;
h3 = getSample(cell + mData.columns + 1).height;
if (isZerothVertexShared(cell))
//convex = (h0 + h3) > (h1 + h2);
convexity = (h0 + h3) - (h1 + h2);
else
//convex = (h2 + h1) > (h0 + h3);
convexity = (h2 + h1) - (h0 + h3);
break;
case 2:
{
if (column < 1) return false;
/* if(isZerothVertexShared(cell-1))
{
// <-------------- COL
// 1====0 + R
// + / /| O
// + / / | W
// + / / | |
// +/ / | |
// + +----+ V
//
// h0 = getHeight(cell - 1);
// h1 = getHeight(cell);
h0 = getSample(cell - 1).height;
h1 = getSample(cell).height;
}
else
{
// <-------------- COL
// + +----+ R
// +\ \ | O
// + \ \ | W
// + \ \ | |
// + \ \| |
// 1====0 + V
//
// h0 = getHeight(cell - 1 + mData.columns);
// h1 = getHeight(cell + mData.columns);
h0 = getSample(cell - 1 + mData.columns).height;
h1 = getSample(cell + mData.columns).height;
}*/
const PxU32 offset0 = isZerothVertexShared(cell-1) ? 0 : mData.columns;
h0 = getSample(cell - 1 + offset0).height;
h1 = getSample(cell + offset0).height;
/* if(isZerothVertexShared(cell))
{
// <-------------- COL
// +----+ + R
// | / /+ O
// | / / + W
// | / / + |
// |/ / + |
// + 3====2 V
//
// h2 = getHeight(cell + mData.columns);
// h3 = getHeight(cell + mData.columns + 1);
h2 = getSample(cell + mData.columns).height;
h3 = getSample(cell + mData.columns + 1).height;
}
else
{
// <-------------- COL
// + 3====2 R
// |\ \ + O
// | \ \ + W
// | \ \ + |
// | \ \+ |
// +----+ + V
//
// h2 = getHeight(cell);
// h3 = getHeight(cell + 1);
h2 = getSample(cell).height;
h3 = getSample(cell + 1).height;
}*/
const PxU32 offset1 = isZerothVertexShared(cell) ? mData.columns : 0;
h2 = getSample(cell + offset1).height;
h3 = getSample(cell + offset1 + 1).height;
//convex = (h3-h2) < (h1-h0);
convexity = (h1-h0) - (h3-h2);
}
break;
}
const PxI32 threshold = PxI32(mData.convexEdgeThreshold);
return convexity > threshold;
}
PX_INLINE bool Gu::HeightField::isValidTriangle(PxU32 triangleIndex) const
{
const PxU32 cell = triangleIndex >> 1;
const PxU32 row = cell / mData.columns;
if (row >= (mData.rows - 1)) return false;
const PxU32 column = cell % mData.columns;
if (column >= (mData.columns - 1)) return false;
return true;
}
PX_INLINE void Gu::HeightField::getTriangleVertexIndices(PxU32 triangleIndex, PxU32& vertexIndex0, PxU32& vertexIndex1, PxU32& vertexIndex2) const
{
const PxU32 cell = triangleIndex >> 1;
if (isZerothVertexShared(cell))
{
// <---- COL
// 0----2 1 R
// | 1 / /| O
// | / / | W
// | / / | |
// |/ / 0 | |
// 1 2----0 V
//
if (isFirstTriangle(triangleIndex))
{
vertexIndex0 = cell + mData.columns;
vertexIndex1 = cell;
vertexIndex2 = cell + mData.columns + 1;
}
else
{
vertexIndex0 = cell + 1;
vertexIndex1 = cell + mData.columns + 1;
vertexIndex2 = cell;
}
}
else
{
// <---- COL
// 2 1----0 R
// |\ \ 0 | O
// | \ \ | W
// | \ \ | |
// | 1 \ \| |
// 0----1 2 V
//
if (isFirstTriangle(triangleIndex))
{
vertexIndex0 = cell;
vertexIndex1 = cell + 1;
vertexIndex2 = cell + mData.columns;
}
else
{
vertexIndex0 = cell + mData.columns + 1;
vertexIndex1 = cell + mData.columns;
vertexIndex2 = cell + 1;
}
}
}
PX_INLINE void Gu::HeightField::getTriangleAdjacencyIndices(PxU32 triangleIndex, PxU32 vertexIndex0, PxU32 vertexIndex1, PxU32 vertexIndex2, PxU32& adjacencyIndex0, PxU32& adjacencyIndex1, PxU32& adjacencyIndex2) const
{
PX_UNUSED(vertexIndex0);
PX_UNUSED(vertexIndex1);
PX_UNUSED(vertexIndex2);
const PxU32 cell = triangleIndex >> 1;
if (isZerothVertexShared(cell))
{
// <---- COL
// 0----2 1 R
// | 1 / /| O
// | / / | W
// | / / | |
// |/ / 0 | |
// 1 2----0 V
//
if (isFirstTriangle(triangleIndex))
{
adjacencyIndex0 = 0xFFFFFFFF;
adjacencyIndex1 = triangleIndex + 1;
adjacencyIndex2 = 0xFFFFFFFF;
if((cell % (mData.columns) != 0))
{
adjacencyIndex0 = triangleIndex - 1;
}
if((cell / mData.columns != mData.rows - 2))
{
const PxU32 tMod = isZerothVertexShared(cell + mData.columns) ? 1u : 0u;
adjacencyIndex2 = ((cell + mData.columns) * 2) + tMod;
}
}
else
{
adjacencyIndex0 = 0xFFFFFFFF;
adjacencyIndex1 = triangleIndex - 1;
adjacencyIndex2 = 0xFFFFFFFF;
if(cell % (mData.columns) < (mData.columns - 2))
{
adjacencyIndex0 = triangleIndex + 1;
}
if(cell >= mData.columns - 1)
{
const PxU32 tMod = isZerothVertexShared(cell - mData.columns) ? 0u : 1u;
adjacencyIndex2 = ((cell - mData.columns) * 2) + tMod;
}
}
}
else
{
// <---- COL
// 2 1----0 R
// |\ \ 0 | O
// | \ \ | W
// | \ \ | |
// | 1 \ \| |
// 0----1 2 V
//
if (isFirstTriangle(triangleIndex))
{
adjacencyIndex0 = 0xFFFFFFFF;
adjacencyIndex1 = triangleIndex + 1;
adjacencyIndex2 = 0xFFFFFFFF;
if(cell >= mData.columns - 1)
{
const PxU32 tMod = isZerothVertexShared(cell - mData.columns) ? 0u : 1u;
adjacencyIndex0 = ((cell - (mData.columns)) * 2) + tMod;
}
if((cell % (mData.columns) != 0))
{
adjacencyIndex2 = triangleIndex - 1;
}
}
else
{
adjacencyIndex0 = 0xFFFFFFFF;
adjacencyIndex1 = triangleIndex - 1;
adjacencyIndex2 = 0xFFFFFFFF;
if((cell / mData.columns != mData.rows - 2))
{
const PxU32 tMod = isZerothVertexShared(cell + mData.columns) ? 1u : 0u;
adjacencyIndex0 = (cell + (mData.columns)) * 2 + tMod;
}
if(cell % (mData.columns) < (mData.columns - 2))
{
adjacencyIndex2 = triangleIndex + 1;
}
}
}
}
PX_INLINE PxVec3 Gu::HeightField::getTriangleNormalInternal(PxU32 triangleIndex) const
{
PxU32 v0, v1, v2;
getTriangleVertexIndices(triangleIndex, v0, v1, v2);
// const PxReal h0 = getHeight(v0);
// const PxReal h1 = getHeight(v1);
// const PxReal h2 = getHeight(v2);
const PxI32 h0 = getSample(v0).height;
const PxI32 h1 = getSample(v1).height;
const PxI32 h2 = getSample(v2).height;
const float thickness = 0.0f;
const PxReal coeff = physx::intrinsics::fsel(thickness, -1.0f, 1.0f);
// PxVec3 n(0,1,0);
const PxU32 cell = triangleIndex >> 1;
if (isZerothVertexShared(cell))
{
// <---- COL
// 0----2 1 R
// | 1 / /| O
// | / / | W
// | / / | |
// |/ / 0 | |
// 1 2----0 V
//
if (isFirstTriangle(triangleIndex))
{
// n.x = -(h0-h1);
// n.z = -(h2-h0);
return PxVec3(coeff*PxReal(h1-h0), coeff, coeff*PxReal(h0-h2));
}
else
{
// n.x = -(h1-h0);
// n.z = -(h0-h2);
return PxVec3(coeff*PxReal(h0-h1), coeff, coeff*PxReal(h2-h0));
}
}
else
{
// <---- COL
// 2 1----0 R
// |\ \ 0 | O
// | \ \ | W
// | \ \ | |
// | 1 \ \| |
// 0----1 2 V
//
if (isFirstTriangle(triangleIndex))
{
// n.x = -(h2-h0);
// n.z = -(h1-h0);
return PxVec3(coeff*PxReal(h0-h2), coeff, coeff*PxReal(h0-h1));
}
else
{
// n.x = -(h0-h2);
// n.z = -(h0-h1);
return PxVec3(coeff*PxReal(h2-h0), coeff, coeff*PxReal(h1-h0));
}
}
// return n;
}
PX_INLINE PxReal Gu::HeightField::getHeightInternal2(PxU32 vertexIndex, PxReal fracX, PxReal fracZ) const
{
if (isZerothVertexShared(vertexIndex))
{
// <----Z---+
// +----+ |
// | /| |
// | / | X
// | / | |
// |/ | |
// +----+ |
// V
const PxReal h0 = getHeight(vertexIndex);
const PxReal h2 = getHeight(vertexIndex + mData.columns + 1);
if (fracZ > fracX)
{
// <----Z---+
// 1----0 |
// | / |
// | / X
// | / |
// |/ |
// 2 |
// V
const PxReal h1 = getHeight(vertexIndex + 1);
return h0 + fracZ*(h1-h0) + fracX*(h2-h1);
}
else
{
// <----Z---+
// 0 |
// /| |
// / | X
// / | |
// / | |
// 2----1 |
// V
const PxReal h1 = getHeight(vertexIndex + mData.columns);
return h0 + fracX*(h1-h0) + fracZ*(h2-h1);
}
}
else
{
// <----Z---+
// +----+ |
// |\ | |
// | \ | X
// | \ | |
// | \| |
// +----+ |
// V
const PxReal h2 = getHeight(vertexIndex + mData.columns);
const PxReal h1 = getHeight(vertexIndex + 1);
if (fracX + fracZ < 1.0f)
{
// <----Z---+
// 1----0 |
// \ | |
// \ | X
// \ | |
// \| |
// 2 |
// V
const PxReal h0 = getHeight(vertexIndex);
return h0 + fracZ*(h1-h0) + fracX*(h2-h0);
}
else
{
// <----Z---+
// 1 |
// |\ |
// | \ X
// | \ |
// | \ |
// 0----2 |
// V
//
// Note that we need to flip fracX and fracZ since we are moving the origin
const PxReal h0 = getHeight(vertexIndex + mData.columns + 1);
return h0 + (1.0f - fracZ)*(h2-h0) + (1.0f - fracX)*(h1-h0);
}
}
}
PX_INLINE PxVec3 Gu::HeightField::getNormal_2(PxU32 vertexIndex, PxReal fracX, PxReal fracZ, PxReal xcoeff, PxReal ycoeff, PxReal zcoeff) const
{
PxVec3 normal;
if (isZerothVertexShared(vertexIndex))
{
// <----Z---+
// +----+ |
// | /| |
// | / | X
// | / | |
// |/ | |
// +----+ |
// V
// const PxReal h0 = getHeight(vertexIndex);
// const PxReal h2 = getHeight(vertexIndex + mData.columns + 1);
const PxI32 ih0 = getSample(vertexIndex).height;
const PxI32 ih2 = getSample(vertexIndex + mData.columns + 1).height;
if (fracZ >= fracX)
{
// <----Z---+
// 1----0 |
// | / |
// | / X
// | / |
// |/ |
// 2 |
// V
// const PxReal h0 = getHeight(vertexIndex);
// const PxReal h1 = getHeight(vertexIndex + 1);
// const PxReal h2 = getHeight(vertexIndex + mData.columns + 1);
// normal.set(-(h2-h1), 1.0f, -(h1-h0));
const PxI32 ih1 = getSample(vertexIndex + 1).height;
normal = PxVec3(PxReal(ih1 - ih2)*xcoeff, ycoeff, PxReal(ih0 - ih1)*zcoeff);
}
else
{
// <----Z---+
// 0 |
// /| |
// / | X
// / | |
// / | |
// 2----1 |
// V
// const PxReal h0 = getHeight(vertexIndex);
// const PxReal h1 = getHeight(vertexIndex + mData.columns);
// const PxReal h2 = getHeight(vertexIndex + mData.columns + 1);
// normal.set(-(h1-h0), 1.0f, -(h2-h1));
const PxI32 ih1 = getSample(vertexIndex + mData.columns).height;
normal = PxVec3(PxReal(ih0 - ih1)*xcoeff, ycoeff, PxReal(ih1 - ih2)*zcoeff);
}
}
else
{
// <----Z---+
// +----+ |
// |\ | |
// | \ | X
// | \ | |
// | \| |
// +----+ |
// V
const PxI32 ih1 = getSample(vertexIndex + 1).height;
const PxI32 ih2 = getSample(vertexIndex + mData.columns).height;
if (fracX + fracZ <= PxReal(1))
{
// <----Z---+
// 1----0 |
// \ | |
// \ | X
// \ | |
// \| |
// 2 |
// V
// const PxReal h0 = getHeight(vertexIndex);
// const PxReal h1 = getHeight(vertexIndex + 1);
// const PxReal h2 = getHeight(vertexIndex + mData.columns);
// normal.set(-(h2-h0), 1.0f, -(h1-h0));
const PxI32 ih0 = getSample(vertexIndex).height;
// const PxI32 ih1 = getSample(vertexIndex + 1).height;
// const PxI32 ih2 = getSample(vertexIndex + mData.columns).height;
normal = PxVec3(PxReal(ih0 - ih2)*xcoeff, ycoeff, PxReal(ih0 - ih1)*zcoeff);
}
else
{
// <----Z---+
// 2 |
// |\ |
// | \ X
// | \ |
// | \ |
// 0----1 |
// V
//
// Note that we need to flip fracX and fracZ since we are moving the origin
// const PxReal h2 = getHeight(vertexIndex + 1);
// const PxReal h1 = getHeight(vertexIndex + mData.columns);
// const PxReal h0 = getHeight(vertexIndex + mData.columns + 1);
// normal.set(-(h0-h2), 1.0f, -(h0-h1));
// const PxI32 ih2 = getSample(vertexIndex + 1).height;
// const PxI32 ih1 = getSample(vertexIndex + mData.columns).height;
const PxI32 ih0 = getSample(vertexIndex + mData.columns + 1).height;
// normal.set(PxReal(ih2 - ih0), 1.0f, PxReal(ih1b - ih0));
normal = PxVec3(PxReal(ih1 - ih0)*xcoeff, ycoeff, PxReal(ih2 - ih0)*zcoeff);
}
}
return normal;
}
PX_INLINE PxU32 Gu::HeightField::getTriangleIndex2(PxU32 cell, PxReal fracX, PxReal fracZ) const
{
if (isZerothVertexShared(cell))
return (fracZ > fracX) ? (cell << 1) + 1 : (cell << 1);
else
return (fracX + fracZ > 1) ? (cell << 1) + 1 : (cell << 1);
}
PX_INLINE PxU32 Gu::HeightField::getTriangleIndex(PxReal x, PxReal z) const
{
PxReal fracX, fracZ;
const PxU32 cell = computeCellCoordinates(x, z, fracX, fracZ);
return getTriangleIndex2(cell, fracX, fracZ);
}
PX_FORCE_INLINE void Gu::HeightField::getTriangleVertices(PxU32 triangleIndex, PxU32 row, PxU32 column, PxVec3& v0, PxVec3& v1, PxVec3& v2) const
{
PxU32 cell = triangleIndex >> 1;
PX_ASSERT(row * getNbColumnsFast() + column == cell);
PxReal h0 = getHeight(cell);
PxReal h1 = getHeight(cell + 1);
PxReal h2 = getHeight(cell + getNbColumnsFast());
PxReal h3 = getHeight(cell + getNbColumnsFast() + 1);
if (isFirstTriangle(triangleIndex))
{
if (isZerothVertexShared(cell))
{
// <---- COL
// 1 R
// /| O
// / | W
// / | |
// / 0 | |
// 2----0 V
//
v0 = PxVec3(PxReal(row + 1), h2, PxReal(column ));
v1 = PxVec3(PxReal(row ), h0, PxReal(column ));
v2 = PxVec3(PxReal(row + 1), h3, PxReal(column + 1));
}
else
{
// <---- COL
// 1----0 R
// \ 0 | O
// \ | W
// \ | |
// \| |
// 2 V
//
v0 = PxVec3(PxReal(row ), h0, PxReal(column ));
v1 = PxVec3(PxReal(row ), h1, PxReal(column + 1));
v2 = PxVec3(PxReal(row + 1), h2, PxReal(column ));
}
}
else
{
if (isZerothVertexShared(cell))
{
// <---- COL
// 0----2 R
// | 1 / O
// | / W
// | / |
// |/ |
// 1 V
//
v0 = PxVec3(PxReal(row ), h1, PxReal(column + 1));
v1 = PxVec3(PxReal(row + 1), h3, PxReal(column + 1));
v2 = PxVec3(PxReal(row ), h0, PxReal(column ));
}
else
{
// <---- COL
// 2 R
// |\ O
// | \ W
// | \ |
// | 1 \ |
// 0----1 V
//
v0 = PxVec3(PxReal(row + 1), h3, PxReal(column + 1));
v1 = PxVec3(PxReal(row + 1), h2, PxReal(column ));
v2 = PxVec3(PxReal(row ), h1, PxReal(column + 1));
}
}
}
PX_FORCE_INLINE const Gu::HeightFieldData* _getHFData(const PxHeightFieldGeometry& hfGeom)
{
return &static_cast<const Gu::HeightField*>(hfGeom.heightField)->getData();
}
}
#endif
| 39,101 | C | 30.893964 | 218 | 0.550753 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.